repo_name
stringlengths
8
130
hexsha
sequence
file_path
sequence
code
sequence
apis
sequence
gunho1123/models
[ "a5d63cd3c28e30476ea53f1a5c3d13370926054d" ]
[ "official/projects/yolact/modeling/yolact_model.py" ]
[ "# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Build simclr models.\"\"\"\nfrom typing import List, Optional, Mapping\nfrom absl import logging\n\nimport tensorflow as tf\n\nfrom official.vision.beta.ops import anchor\n\nlayers = tf.keras.layers\n\n\[email protected]_keras_serializable(package='yolact')\nclass YolactModel(tf.keras.Model):\n \"\"\"A classification model based on SimCLR framework.\"\"\"\n\n def __init__(self,\n backbone: tf.keras.models.Model,\n decoder: tf.keras.models.Model,\n prediction_head: tf.keras.layers.Layer,\n protonet: tf.keras.layers.Layer,\n detection_generator: tf.keras.layers.Layer,\n min_level: int,\n max_level: int,\n num_scales: int,\n aspect_ratios: List[float],\n anchor_size: float,\n **kwargs):\n \"\"\"A classification model based on SimCLR framework.\n Args:\n backbone: a backbone network.\n input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.\n mode: `str` indicates mode of training to be executed.\n backbone_trainable: `bool` whether the backbone is trainable or not.\n **kwargs: keyword arguments to be passed.\n \"\"\"\n super(YolactModel, self).__init__(**kwargs)\n self._config_dict = {\n 'backbone': backbone,\n 'decoder': decoder,\n 'prediction_head': prediction_head,\n 'protonet': protonet,\n 'detection_generator': detection_generator,\n 'min_level': min_level,\n 'max_level': max_level,\n 'num_scales': num_scales,\n 'aspect_ratios': aspect_ratios,\n 'anchor_size': anchor_size,\n }\n self.backbone = backbone\n self.decoder = decoder\n self.prediction_head = prediction_head\n self.protonet = protonet\n self.detection_generator = detection_generator\n\n def call(self,\n inputs: tf.Tensor,\n image_shape: Optional[tf.Tensor] = None,\n anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,\n training=None,\n **kwargs):\n \"\"\"Forward pass of the YOLACT model.\n\n Args:\n images: `Tensor`, the input batched images, whose shape is\n [batch, height, width, 3].\n image_shape: `Tensor`, the actual shape of the input images, whose shape\n is [batch, 2] where the last dimension is [height, width]. Note that\n this is the actual image shape excluding paddings. For example, images\n in the batch may be resized into different shapes before padding to the\n fixed size.\n anchor_boxes: a dict of tensors which includes multilevel anchors.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the anchor coordinates of a particular feature\n level, whose shape is [height_l, width_l, num_anchors_per_location].\n training: `bool`, indicating whether it is in training mode.\n\n Returns:\n scores: a dict of tensors which includes scores of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the box scores predicted from a particular feature\n level, whose shape is\n [batch, height_l, width_l, num_classes * num_anchors_per_location].\n boxes: a dict of tensors which includes coordinates of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the box coordinates predicted from a particular\n feature level, whose shape is\n [batch, height_l, width_l, 4 * num_anchors_per_location].\n masks: a dict of tensors which includes mask coefficients of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the mask coefficients predicted from a particular\n feature level, whose shape is\n [batch, height_l, width_l, k * num_anchors_per_location].\n protonet_features: `Tensor`, the protonet features, whose shape is\n [batch, height_2, width_2, k].\n \"\"\"\n outputs = {}\n backbone_features = self.backbone(inputs)\n decoder_features = self.decoder(backbone_features)\n levels = sorted(decoder_features.keys()) # Ascending order\n\n raw_scores, raw_boxes, raw_masks = self.prediction_head(decoder_features)\n protonet_features = self.protonet(decoder_features[levels[0]])\n\n if training:\n outputs.update({\n 'cls_outputs': raw_scores,\n 'box_outputs': raw_boxes,\n 'mask_outputs': raw_masks,\n 'protonet_features': protonet_features,\n })\n else:\n # Generate anchor boxes for this batch if not provided.\n if anchor_boxes is None:\n _, image_height, image_width, _ = inputs.get_shape().as_list()\n anchor_boxes = anchor.Anchor(\n min_level=self._config_dict['min_level'],\n max_level=self._config_dict['max_level'],\n num_scales=self._config_dict['num_scales'],\n aspect_ratios=self._config_dict['aspect_ratios'],\n anchor_size=self._config_dict['anchor_size'],\n image_size=(image_height, image_width)).multilevel_boxes\n for l in anchor_boxes:\n anchor_boxes[l] = tf.tile(\n tf.expand_dims(anchor_boxes[l], axis=0),\n [tf.shape(inputs)[0], 1, 1, 1])\n # Post-processing.\n raw_attributes = {\n 'raw_masks' : raw_masks,\n }\n final_results = self.detection_generator(raw_boxes, raw_scores,\n anchor_boxes, image_shape,\n raw_attributes)\n\n outputs.update({\n 'cls_outputs': raw_scores,\n 'box_outputs': raw_boxes,\n 'mask_outputs': raw_masks,\n 'protonet_features': protonet_features,\n })\n\n outputs.update({\n 'detection_boxes': final_results['detection_boxes'],\n 'detection_scores': final_results['detection_scores'],\n 'detection_classes': final_results['detection_classes'],\n 'num_detections': final_results['num_detections']\n })\n\n final_mask_coefficients = final_results['detection_attributes']['raw_masks']\n batch, proto_height, proto_width, proto_channel = protonet_features.get_shape().as_list()\n protonet_features = tf.reshape(protonet_features, [batch, -1, proto_channel]) # [batch, H*W, 32]\n assembled_masks = tf.matmul(\n a=protonet_features, # [batch, proto_height*proto_width, proto_channel]\n b=final_mask_coefficients, # [batch, max_num_instances, 32]\n transpose_b=True)\n assembled_masks = tf.reshape(assembled_masks, [batch, proto_height, proto_width, -1])\n assembled_masks = tf.transpose(assembled_masks, perm=[0,3,1,2])\n # [batch, max_num_instances, proto_height, proto_width]\n\n outputs['detection_masks'] = assembled_masks\n\n \n return outputs\n\n @property\n def checkpoint_items(self):\n \"\"\"Returns a dictionary of items to be additionally checkpointed.\"\"\"\n items = dict(backbone=self.backbone)\n items.update(decoder=self.decoder)\n return items\n\n def get_config(self):\n return self._config_dict\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n" ]
[ [ "tensorflow.shape", "tensorflow.reshape", "tensorflow.expand_dims", "tensorflow.matmul", "tensorflow.keras.utils.register_keras_serializable", "tensorflow.transpose" ] ]
derekeverett/stat_model_surrogates
[ "48b140fe89ac1ffa5a7d5733337a5fa519f95d6f" ]
[ "Bayesian_NN_regression/nn_grid_search.py" ]
[ "import pickle\nimport sklearn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom warnings import filterwarnings\nfilterwarnings('ignore')\n\nimport seaborn as sns\nsns.set()\nfrom pandas.plotting import scatter_matrix\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom calculations_load import *\nfrom configurations import *\n\n# Get all the observables list\n\nnobs = 0\nobservables = []\nobs_name = []\n\nfor obs, cent_list in obs_cent_list['Pb-Pb-2760'].items():\n if obs not in active_obs_list['Pb-Pb-2760']:\n continue\n observables.append(obs)\n n = np.array(cent_list).shape[0]\n for i in cent_list:\n obs_name.append(f'{obs}_{i}')\n #self._slices[obs] = slice(self.nobs, self.nobs + n)\n nobs += n\n\nsystem_str = 'Pb-Pb-2760'\ndesign_file = 'production_designs/500pts/design_pts_Pb_Pb_2760_production/design_points_main_PbPb-2760.dat'\ndesign = pd.read_csv(design_file)\ndesign = design.drop(\"idx\", axis=1)\n\n#delete bad design points\ndrop_indices = list(delete_design_pts_set)\ndesign = design.drop(drop_indices)\n\n#choose features (inputs)\n#feature_cols = ['norm', 'trento_p'] #specific choices\nfeature_cols = design.keys().values #all of them\nn_features = len(feature_cols)\n\nX = design[feature_cols]\n\nn_design = SystemsInfo[\"Pb-Pb-2760\"][\"n_design\"]\nnpt = n_design - len(delete_design_pts_set)\nobs = 'dNch_deta' #choose the observable we want to emulate\n\nY = np.array([])\n\nfor pt in range(npt):\n for obs in active_obs_list['Pb-Pb-2760']:\n Y = np.append( Y, trimmed_model_data[system_str][pt, idf][obs]['mean'][:], axis=0)\nY = Y.reshape(X.shape[0], -1)\n\n\nprint( \"X.shape : \"+ str(X.shape) )\nprint( \"Y.shape : \"+ str(Y.shape) )\n\n#Scaling the inputs\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2)\n\n#X_scaler = StandardScaler().fit(X_train)\n#Y_scaler = StandardScaler().fit(Y_train)\nX_scaler = MinMaxScaler(feature_range=(-1, 1)).fit(X_train)\nY_scaler = MinMaxScaler(feature_range=(-1, 1)).fit(Y_train)\n\nX_train_sc = X_scaler.transform(X_train)\nX_test_sc = X_scaler.transform(X_test)\n\nY_train_sc = Y_scaler.transform(Y_train)\nY_test_sc = Y_scaler.transform(Y_test)\n\n\n#Building NN model\n\nfrom keras.models import Model\nfrom keras.layers import Flatten, Input, Dense, Dropout ,Conv1D\ndef model_fn(ly1_units=20,activation_1='sigmoid',activation_2='tanh',ly2_units=20,activation_3='tanh',\\\n dropout_rate1 = 0.1,dropout_rate2 = 0.1,loss_fn=\"huber_loss\", krnl_sz=5,\\\n optimizer='adam'):\n inputs = Input(shape=(X.shape[1],1))\n x = Dense(ly1_units, activation=activation_1)(inputs)\n # print(x.shape)\n x= Conv1D(filters=1,kernel_size=krnl_sz)(x)\n x= Flatten()(x)\n x = Dropout(dropout_rate1)(x, training=True)\n x = Dense(ly2_units, activation=activation_2)(x)\n x = Dropout(dropout_rate2)(x, training=True)\n x = Dense(Y.shape[1], activation=activation_3)(x)\n outputs = x\n model = Model(inputs, outputs)\n#model.compile(loss=\"mean_squared_error\", optimizer='adam')\n model.compile(loss=loss_fn, optimizer=optimizer)\n model.summary()\n return model\n\n\n#initiate models\n\nmodel=model_fn()\n\n#reshape inputs\n\ntrain_tf_X=np.expand_dims(X_train_sc,axis=2)\n\n#Grid search\n\nfrom sklearn.model_selection import GridSearchCV\nly_units=[50,100,200,500]\nactivation_1=['sigmoid','tanh']\nactivation_2=['linear','tanh']\ndropout_rate = [0.2, 0.3, 0.5]\nkrnl_sz=[5,10,20,40]\nloss_fn=[\"mse\",\"huber_loss\"]\noptimizer=['adam']\nbatch_size=[10, 20, 50]\nestimator=tf.keras.wrappers.scikit_learn.KerasRegressor(build_fn=model_fn)\nparam_grid = dict(ly1_units=ly_units,ly2_units=ly_units,activation_1=activation_1, activation_2=activation_1,\\\n activation_3=activation_2,dropout_rate1=dropout_rate,dropout_rate2=dropout_rate,\\\n loss_fn=loss_fn,optimizer=optimizer,batch_size=batch_size,krnl_sz=krnl_sz)\ngrid = GridSearchCV(estimator=estimator, param_grid=param_grid, n_jobs=-1, cv=2, scoring='r2',verbose=20)\ngrid_result = grid.fit(train_tf_X, Y_train_sc,epochs=300,verbose=0)\n\nprint(f'The best set of hyperparameters are{grid_result.best_params_}')\n\nfile = open('grid_search_results.pkl', 'wb')\n\npickle.dump(grid_result, file)\n" ]
[ [ "numpy.append", "sklearn.preprocessing.MinMaxScaler", "tensorflow.keras.wrappers.scikit_learn.KerasRegressor", "numpy.expand_dims", "sklearn.model_selection.GridSearchCV", "numpy.array", "sklearn.model_selection.train_test_split" ] ]
themantalope/MONAI
[ "f398298b5aadc076102261a687a158f6ac17ad1c" ]
[ "tests/test_patch_wsi_dataset.py" ]
[ "# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nfrom unittest import skipUnless\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom parameterized import parameterized\n\nfrom monai.apps.pathology.data import PatchWSIDataset\nfrom monai.apps.utils import download_url\nfrom monai.utils import optional_import\n\n_, has_cim = optional_import(\"cucim\")\n_, has_osl = optional_import(\"openslide\")\n\nFILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\"\nFILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", \"temp_\" + os.path.basename(FILE_URL))\n\nTEST_CASE_0 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": (1, 1),\n \"grid_shape\": (1, 1),\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_1 = [\n {\n \"data\": [{\"image\": FILE_PATH, \"location\": [10004, 20004], \"label\": [0, 0, 0, 1]}],\n \"region_size\": (8, 8),\n \"grid_shape\": (2, 2),\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_2 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": 1,\n \"grid_shape\": 1,\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_3 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [[[0, 1], [1, 0]]]},\n ],\n \"region_size\": 1,\n \"grid_shape\": 1,\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[0, 1], [1, 0]]])},\n ],\n]\n\nTEST_CASE_OPENSLIDE_0 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": (1, 1),\n \"grid_shape\": (1, 1),\n \"patch_size\": 1,\n \"image_reader_name\": \"OpenSlide\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_OPENSLIDE_1 = [\n {\n \"data\": [{\"image\": FILE_PATH, \"location\": [10004, 20004], \"label\": [0, 0, 0, 1]}],\n \"region_size\": (8, 8),\n \"grid_shape\": (2, 2),\n \"patch_size\": 1,\n \"image_reader_name\": \"OpenSlide\",\n },\n [\n {\"image\": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\n\nclass TestPatchWSIDataset(unittest.TestCase):\n def setUp(self):\n download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\")\n\n @parameterized.expand(\n [\n TEST_CASE_0,\n TEST_CASE_1,\n TEST_CASE_2,\n TEST_CASE_3,\n ]\n )\n @skipUnless(has_cim, \"Requires CuCIM\")\n def test_read_patches_cucim(self, input_parameters, expected):\n dataset = PatchWSIDataset(**input_parameters)\n samples = dataset[0]\n for i in range(len(samples)):\n self.assertTupleEqual(samples[i][\"label\"].shape, expected[i][\"label\"].shape)\n self.assertTupleEqual(samples[i][\"image\"].shape, expected[i][\"image\"].shape)\n self.assertIsNone(assert_array_equal(samples[i][\"label\"], expected[i][\"label\"]))\n self.assertIsNone(assert_array_equal(samples[i][\"image\"], expected[i][\"image\"]))\n\n @parameterized.expand(\n [\n TEST_CASE_OPENSLIDE_0,\n TEST_CASE_OPENSLIDE_1,\n ]\n )\n @skipUnless(has_osl, \"Requires OpenSlide\")\n def test_read_patches_openslide(self, input_parameters, expected):\n dataset = PatchWSIDataset(**input_parameters)\n samples = dataset[0]\n for i in range(len(samples)):\n self.assertTupleEqual(samples[i][\"label\"].shape, expected[i][\"label\"].shape)\n self.assertTupleEqual(samples[i][\"image\"].shape, expected[i][\"image\"].shape)\n self.assertIsNone(assert_array_equal(samples[i][\"label\"], expected[i][\"label\"]))\n self.assertIsNone(assert_array_equal(samples[i][\"image\"], expected[i][\"image\"]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n" ]
[ [ "numpy.array", "numpy.testing.assert_array_equal" ] ]
Mirkazemi/openml-python
[ "4ff66ed284790e4ae29245a15e23a3fa1f1c3a6b" ]
[ "openml/tasks/functions.py" ]
[ "# License: BSD 3-Clause\n\nfrom collections import OrderedDict\nimport io\nimport re\nimport os\nfrom typing import Union, Dict, Optional\n\nimport pandas as pd\nimport xmltodict\n\nfrom ..exceptions import OpenMLCacheException\nfrom ..datasets import get_dataset\nfrom .task import (\n OpenMLClassificationTask,\n OpenMLClusteringTask,\n OpenMLLearningCurveTask,\n TaskType,\n OpenMLRegressionTask,\n OpenMLSupervisedTask,\n OpenMLTask,\n)\nimport openml.utils\nimport openml._api_calls\n\n\nTASKS_CACHE_DIR_NAME = \"tasks\"\n\n\ndef _get_cached_tasks():\n \"\"\"Return a dict of all the tasks which are cached locally.\n Returns\n -------\n tasks : OrderedDict\n A dict of all the cached tasks. Each task is an instance of\n OpenMLTask.\n \"\"\"\n tasks = OrderedDict()\n\n task_cache_dir = openml.utils._create_cache_directory(TASKS_CACHE_DIR_NAME)\n directory_content = os.listdir(task_cache_dir)\n directory_content.sort()\n # Find all dataset ids for which we have downloaded the dataset\n # description\n\n for filename in directory_content:\n if not re.match(r\"[0-9]*\", filename):\n continue\n\n tid = int(filename)\n tasks[tid] = _get_cached_task(tid)\n\n return tasks\n\n\ndef _get_cached_task(tid: int) -> OpenMLTask:\n \"\"\"Return a cached task based on the given id.\n\n Parameters\n ----------\n tid : int\n Id of the task.\n\n Returns\n -------\n OpenMLTask\n \"\"\"\n tid_cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, tid)\n\n try:\n with io.open(os.path.join(tid_cache_dir, \"task.xml\"), encoding=\"utf8\") as fh:\n return _create_task_from_xml(fh.read())\n except (OSError, IOError):\n openml.utils._remove_cache_dir_for_id(TASKS_CACHE_DIR_NAME, tid_cache_dir)\n raise OpenMLCacheException(\"Task file for tid %d not \" \"cached\" % tid)\n\n\ndef _get_estimation_procedure_list():\n \"\"\"Return a list of all estimation procedures which are on OpenML.\n Returns\n -------\n procedures : list\n A list of all estimation procedures. Every procedure is represented by\n a dictionary containing the following information: id, task type id,\n name, type, repeats, folds, stratified.\n \"\"\"\n url_suffix = \"estimationprocedure/list\"\n xml_string = openml._api_calls._perform_api_call(url_suffix, \"get\")\n\n procs_dict = xmltodict.parse(xml_string)\n # Minimalistic check if the XML is useful\n if \"oml:estimationprocedures\" not in procs_dict:\n raise ValueError(\"Error in return XML, does not contain tag \" \"oml:estimationprocedures.\")\n elif \"@xmlns:oml\" not in procs_dict[\"oml:estimationprocedures\"]:\n raise ValueError(\n \"Error in return XML, does not contain tag \"\n \"@xmlns:oml as a child of oml:estimationprocedures.\"\n )\n elif procs_dict[\"oml:estimationprocedures\"][\"@xmlns:oml\"] != \"http://openml.org/openml\":\n raise ValueError(\n \"Error in return XML, value of \"\n \"oml:estimationprocedures/@xmlns:oml is not \"\n \"http://openml.org/openml, but %s\"\n % str(procs_dict[\"oml:estimationprocedures\"][\"@xmlns:oml\"])\n )\n\n procs = []\n for proc_ in procs_dict[\"oml:estimationprocedures\"][\"oml:estimationprocedure\"]:\n procs.append(\n {\n \"id\": int(proc_[\"oml:id\"]),\n \"task_type_id\": TaskType(int(proc_[\"oml:ttid\"])),\n \"name\": proc_[\"oml:name\"],\n \"type\": proc_[\"oml:type\"],\n }\n )\n\n return procs\n\n\ndef list_tasks(\n task_type: Optional[TaskType] = None,\n offset: Optional[int] = None,\n size: Optional[int] = None,\n tag: Optional[str] = None,\n output_format: str = \"dict\",\n **kwargs\n) -> Union[Dict, pd.DataFrame]:\n \"\"\"\n Return a number of tasks having the given tag and task_type\n\n Parameters\n ----------\n Filter task_type is separated from the other filters because\n it is used as task_type in the task description, but it is named\n type when used as a filter in list tasks call.\n task_type : TaskType, optional\n ID of the task type as detailed `here <https://www.openml.org/search?type=task_type>`_.\n - Supervised classification: 1\n - Supervised regression: 2\n - Learning curve: 3\n - Supervised data stream classification: 4\n - Clustering: 5\n - Machine Learning Challenge: 6\n - Survival Analysis: 7\n - Subgroup Discovery: 8\n offset : int, optional\n the number of tasks to skip, starting from the first\n size : int, optional\n the maximum number of tasks to show\n tag : str, optional\n the tag to include\n output_format: str, optional (default='dict')\n The parameter decides the format of the output.\n - If 'dict' the output is a dict of dict\n - If 'dataframe' the output is a pandas DataFrame\n kwargs: dict, optional\n Legal filter operators: data_tag, status, data_id, data_name,\n number_instances, number_features,\n number_classes, number_missing_values.\n\n Returns\n -------\n dict\n All tasks having the given task_type and the give tag. Every task is\n represented by a dictionary containing the following information:\n task id, dataset id, task_type and status. If qualities are calculated\n for the associated dataset, some of these are also returned.\n dataframe\n All tasks having the given task_type and the give tag. Every task is\n represented by a row in the data frame containing the following information\n as columns: task id, dataset id, task_type and status. If qualities are\n calculated for the associated dataset, some of these are also returned.\n \"\"\"\n if output_format not in [\"dataframe\", \"dict\"]:\n raise ValueError(\n \"Invalid output format selected. \" \"Only 'dict' or 'dataframe' applicable.\"\n )\n return openml.utils._list_all(\n output_format=output_format,\n listing_call=_list_tasks,\n task_type=task_type,\n offset=offset,\n size=size,\n tag=tag,\n **kwargs\n )\n\n\ndef _list_tasks(task_type=None, output_format=\"dict\", **kwargs):\n \"\"\"\n Perform the api call to return a number of tasks having the given filters.\n Parameters\n ----------\n Filter task_type is separated from the other filters because\n it is used as task_type in the task description, but it is named\n type when used as a filter in list tasks call.\n task_type : TaskType, optional\n ID of the task type as detailed\n `here <https://www.openml.org/search?type=task_type>`_.\n - Supervised classification: 1\n - Supervised regression: 2\n - Learning curve: 3\n - Supervised data stream classification: 4\n - Clustering: 5\n - Machine Learning Challenge: 6\n - Survival Analysis: 7\n - Subgroup Discovery: 8\n output_format: str, optional (default='dict')\n The parameter decides the format of the output.\n - If 'dict' the output is a dict of dict\n - If 'dataframe' the output is a pandas DataFrame\n kwargs: dict, optional\n Legal filter operators: tag, task_id (list), data_tag, status, limit,\n offset, data_id, data_name, number_instances, number_features,\n number_classes, number_missing_values.\n\n Returns\n -------\n dict or dataframe\n \"\"\"\n api_call = \"task/list\"\n if task_type is not None:\n api_call += \"/type/%d\" % task_type.value\n if kwargs is not None:\n for operator, value in kwargs.items():\n if operator == \"task_id\":\n value = \",\".join([str(int(i)) for i in value])\n api_call += \"/%s/%s\" % (operator, value)\n return __list_tasks(api_call=api_call, output_format=output_format)\n\n\ndef __list_tasks(api_call, output_format=\"dict\"):\n xml_string = openml._api_calls._perform_api_call(api_call, \"get\")\n tasks_dict = xmltodict.parse(xml_string, force_list=(\"oml:task\", \"oml:input\"))\n # Minimalistic check if the XML is useful\n if \"oml:tasks\" not in tasks_dict:\n raise ValueError('Error in return XML, does not contain \"oml:runs\": %s' % str(tasks_dict))\n elif \"@xmlns:oml\" not in tasks_dict[\"oml:tasks\"]:\n raise ValueError(\n \"Error in return XML, does not contain \" '\"oml:runs\"/@xmlns:oml: %s' % str(tasks_dict)\n )\n elif tasks_dict[\"oml:tasks\"][\"@xmlns:oml\"] != \"http://openml.org/openml\":\n raise ValueError(\n \"Error in return XML, value of \"\n '\"oml:runs\"/@xmlns:oml is not '\n '\"http://openml.org/openml\": %s' % str(tasks_dict)\n )\n\n assert type(tasks_dict[\"oml:tasks\"][\"oml:task\"]) == list, type(tasks_dict[\"oml:tasks\"])\n\n tasks = dict()\n procs = _get_estimation_procedure_list()\n proc_dict = dict((x[\"id\"], x) for x in procs)\n\n for task_ in tasks_dict[\"oml:tasks\"][\"oml:task\"]:\n tid = None\n try:\n tid = int(task_[\"oml:task_id\"])\n task = {\n \"tid\": tid,\n \"ttid\": TaskType(int(task_[\"oml:task_type_id\"])),\n \"did\": int(task_[\"oml:did\"]),\n \"name\": task_[\"oml:name\"],\n \"task_type\": task_[\"oml:task_type\"],\n \"status\": task_[\"oml:status\"],\n }\n\n # Other task inputs\n for input in task_.get(\"oml:input\", list()):\n if input[\"@name\"] == \"estimation_procedure\":\n task[input[\"@name\"]] = proc_dict[int(input[\"#text\"])][\"name\"]\n else:\n value = input.get(\"#text\")\n task[input[\"@name\"]] = value\n\n # The number of qualities can range from 0 to infinity\n for quality in task_.get(\"oml:quality\", list()):\n if \"#text\" not in quality:\n quality_value = 0.0\n else:\n quality[\"#text\"] = float(quality[\"#text\"])\n if abs(int(quality[\"#text\"]) - quality[\"#text\"]) < 0.0000001:\n quality[\"#text\"] = int(quality[\"#text\"])\n quality_value = quality[\"#text\"]\n task[quality[\"@name\"]] = quality_value\n tasks[tid] = task\n except KeyError as e:\n if tid is not None:\n raise KeyError(\"Invalid xml for task %d: %s\\nFrom %s\" % (tid, e, task_))\n else:\n raise KeyError(\"Could not find key %s in %s!\" % (e, task_))\n\n if output_format == \"dataframe\":\n tasks = pd.DataFrame.from_dict(tasks, orient=\"index\")\n\n return tasks\n\n\ndef get_tasks(task_ids, download_data=True):\n \"\"\"Download tasks.\n\n This function iterates :meth:`openml.tasks.get_task`.\n\n Parameters\n ----------\n task_ids : iterable\n Integers/Strings representing task ids.\n download_data : bool\n Option to trigger download of data along with the meta data.\n\n Returns\n -------\n list\n \"\"\"\n tasks = []\n for task_id in task_ids:\n tasks.append(get_task(task_id, download_data))\n return tasks\n\n\[email protected]_safe_if_oslo_installed\ndef get_task(task_id: int, download_data: bool = True) -> OpenMLTask:\n \"\"\"Download OpenML task for a given task ID.\n\n Downloads the task representation, while the data splits can be\n downloaded optionally based on the additional parameter. Else,\n splits will either way be downloaded when the task is being used.\n\n Parameters\n ----------\n task_id : int or str\n The OpenML task id.\n download_data : bool\n Option to trigger download of data along with the meta data.\n\n Returns\n -------\n task\n \"\"\"\n try:\n task_id = int(task_id)\n except (ValueError, TypeError):\n raise ValueError(\"Dataset ID is neither an Integer nor can be \" \"cast to an Integer.\")\n\n tid_cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id,)\n\n try:\n task = _get_task_description(task_id)\n dataset = get_dataset(task.dataset_id, download_data)\n # List of class labels availaible in dataset description\n # Including class labels as part of task meta data handles\n # the case where data download was initially disabled\n if isinstance(task, (OpenMLClassificationTask, OpenMLLearningCurveTask)):\n task.class_labels = dataset.retrieve_class_labels(task.target_name)\n # Clustering tasks do not have class labels\n # and do not offer download_split\n if download_data:\n if isinstance(task, OpenMLSupervisedTask):\n task.download_split()\n except Exception as e:\n openml.utils._remove_cache_dir_for_id(\n TASKS_CACHE_DIR_NAME, tid_cache_dir,\n )\n raise e\n\n return task\n\n\ndef _get_task_description(task_id):\n\n try:\n return _get_cached_task(task_id)\n except OpenMLCacheException:\n xml_file = os.path.join(\n openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id,), \"task.xml\",\n )\n task_xml = openml._api_calls._perform_api_call(\"task/%d\" % task_id, \"get\")\n\n with io.open(xml_file, \"w\", encoding=\"utf8\") as fh:\n fh.write(task_xml)\n return _create_task_from_xml(task_xml)\n\n\ndef _create_task_from_xml(xml):\n \"\"\"Create a task given a xml string.\n\n Parameters\n ----------\n xml : string\n Task xml representation.\n\n Returns\n -------\n OpenMLTask\n \"\"\"\n dic = xmltodict.parse(xml)[\"oml:task\"]\n estimation_parameters = dict()\n inputs = dict()\n # Due to the unordered structure we obtain, we first have to extract\n # the possible keys of oml:input; dic[\"oml:input\"] is a list of\n # OrderedDicts\n\n # Check if there is a list of inputs\n if isinstance(dic[\"oml:input\"], list):\n for input_ in dic[\"oml:input\"]:\n name = input_[\"@name\"]\n inputs[name] = input_\n # Single input case\n elif isinstance(dic[\"oml:input\"], dict):\n name = dic[\"oml:input\"][\"@name\"]\n inputs[name] = dic[\"oml:input\"]\n\n evaluation_measures = None\n if \"evaluation_measures\" in inputs:\n evaluation_measures = inputs[\"evaluation_measures\"][\"oml:evaluation_measures\"][\n \"oml:evaluation_measure\"\n ]\n\n task_type = TaskType(int(dic[\"oml:task_type_id\"]))\n common_kwargs = {\n \"task_id\": dic[\"oml:task_id\"],\n \"task_type\": dic[\"oml:task_type\"],\n \"task_type_id\": task_type,\n \"data_set_id\": inputs[\"source_data\"][\"oml:data_set\"][\"oml:data_set_id\"],\n \"evaluation_measure\": evaluation_measures,\n }\n if task_type in (\n TaskType.SUPERVISED_CLASSIFICATION,\n TaskType.SUPERVISED_REGRESSION,\n TaskType.LEARNING_CURVE,\n ):\n # Convert some more parameters\n for parameter in inputs[\"estimation_procedure\"][\"oml:estimation_procedure\"][\n \"oml:parameter\"\n ]:\n name = parameter[\"@name\"]\n text = parameter.get(\"#text\", \"\")\n estimation_parameters[name] = text\n\n common_kwargs[\"estimation_procedure_type\"] = inputs[\"estimation_procedure\"][\n \"oml:estimation_procedure\"\n ][\"oml:type\"]\n common_kwargs[\"estimation_parameters\"] = estimation_parameters\n common_kwargs[\"target_name\"] = inputs[\"source_data\"][\"oml:data_set\"][\"oml:target_feature\"]\n common_kwargs[\"data_splits_url\"] = inputs[\"estimation_procedure\"][\n \"oml:estimation_procedure\"\n ][\"oml:data_splits_url\"]\n\n cls = {\n TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask,\n TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask,\n TaskType.CLUSTERING: OpenMLClusteringTask,\n TaskType.LEARNING_CURVE: OpenMLLearningCurveTask,\n }.get(task_type)\n if cls is None:\n raise NotImplementedError(\"Task type %s not supported.\" % common_kwargs[\"task_type\"])\n return cls(**common_kwargs)\n\n\ndef create_task(\n task_type: TaskType,\n dataset_id: int,\n estimation_procedure_id: int,\n target_name: Optional[str] = None,\n evaluation_measure: Optional[str] = None,\n **kwargs\n) -> Union[\n OpenMLClassificationTask, OpenMLRegressionTask, OpenMLLearningCurveTask, OpenMLClusteringTask\n]:\n \"\"\"Create a task based on different given attributes.\n\n Builds a task object with the function arguments as\n attributes. The type of the task object built is\n determined from the task type id.\n More information on how the arguments (task attributes),\n relate to the different possible tasks can be found in\n the individual task objects at the openml.tasks.task\n module.\n\n Parameters\n ----------\n task_type : TaskType\n Id of the task type.\n dataset_id : int\n The id of the dataset for the task.\n target_name : str, optional\n The name of the feature used as a target.\n At the moment, only optional for the clustering tasks.\n estimation_procedure_id : int\n The id of the estimation procedure.\n evaluation_measure : str, optional\n The name of the evaluation measure.\n kwargs : dict, optional\n Other task attributes that are not mandatory\n for task upload.\n\n Returns\n -------\n OpenMLClassificationTask, OpenMLRegressionTask,\n OpenMLLearningCurveTask, OpenMLClusteringTask\n \"\"\"\n task_cls = {\n TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask,\n TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask,\n TaskType.CLUSTERING: OpenMLClusteringTask,\n TaskType.LEARNING_CURVE: OpenMLLearningCurveTask,\n }.get(task_type)\n\n if task_cls is None:\n raise NotImplementedError(\"Task type {0:d} not supported.\".format(task_type))\n else:\n return task_cls(\n task_type_id=task_type,\n task_type=None,\n data_set_id=dataset_id,\n target_name=target_name,\n estimation_procedure_id=estimation_procedure_id,\n evaluation_measure=evaluation_measure,\n **kwargs\n )\n" ]
[ [ "pandas.DataFrame.from_dict" ] ]
jackyoung96/PettingZoo
[ "7ebcd1ddf9124b6857048af930d677974ab201a6" ]
[ "pettingzoo/mpe/scenarios/simple_world_comm.py" ]
[ "import numpy as np\n\nfrom .._mpe_utils.core import Agent, Landmark, World\nfrom .._mpe_utils.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(\n self,\n num_good_agents=2,\n num_adversaries=4,\n num_landmarks=1,\n num_food=2,\n num_forests=2,\n ):\n world = World()\n # set any world properties first\n world.dim_c = 4\n # world.damping = 1\n num_good_agents = num_good_agents\n num_adversaries = num_adversaries\n num_agents = num_adversaries + num_good_agents\n num_landmarks = num_landmarks\n num_food = num_food\n num_forests = num_forests\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.adversary = True if i < num_adversaries else False\n base_index = i - 1 if i < num_adversaries else i - num_adversaries\n base_index = 0 if base_index < 0 else base_index\n base_name = \"adversary\" if agent.adversary else \"agent\"\n base_name = \"leadadversary\" if i == 0 else base_name\n agent.name = f\"{base_name}_{base_index}\"\n agent.collide = True\n agent.leader = True if i == 0 else False\n agent.silent = True if i > 0 else False\n agent.size = 0.075 if agent.adversary else 0.045\n agent.accel = 3.0 if agent.adversary else 4.0\n # agent.accel = 20.0 if agent.adversary else 25.0\n agent.max_speed = 1.0 if agent.adversary else 1.3\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = \"landmark %d\" % i\n landmark.collide = True\n landmark.movable = False\n landmark.size = 0.2\n landmark.boundary = False\n world.food = [Landmark() for i in range(num_food)]\n for i, lm in enumerate(world.food):\n lm.name = \"food %d\" % i\n lm.collide = False\n lm.movable = False\n lm.size = 0.03\n lm.boundary = False\n world.forests = [Landmark() for i in range(num_forests)]\n for i, lm in enumerate(world.forests):\n lm.name = \"forest %d\" % i\n lm.collide = False\n lm.movable = False\n lm.size = 0.3\n lm.boundary = False\n world.landmarks += world.food\n world.landmarks += world.forests\n # world.landmarks += self.set_boundaries(world)\n # world boundaries now penalized with negative reward\n return world\n\n def set_boundaries(self, world):\n boundary_list = []\n landmark_size = 1\n edge = 1 + landmark_size\n num_landmarks = int(edge * 2 / landmark_size)\n for x_pos in [-edge, edge]:\n for i in range(num_landmarks):\n landmark = Landmark()\n landmark.state.p_pos = np.array([x_pos, -1 + i * landmark_size])\n boundary_list.append(landmark)\n\n for y_pos in [-edge, edge]:\n for i in range(num_landmarks):\n landmark = Landmark()\n landmark.state.p_pos = np.array([-1 + i * landmark_size, y_pos])\n boundary_list.append(landmark)\n\n for i, l in enumerate(boundary_list):\n l.name = \"boundary %d\" % i\n l.collide = True\n l.movable = False\n l.boundary = True\n l.color = np.array([0.75, 0.75, 0.75])\n l.size = landmark_size\n l.state.p_vel = np.zeros(world.dim_p)\n\n return boundary_list\n\n def reset_world(self, world, np_random):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = (\n np.array([0.45, 0.95, 0.45])\n if not agent.adversary\n else np.array([0.95, 0.45, 0.45])\n )\n agent.color -= (\n np.array([0.3, 0.3, 0.3]) if agent.leader else np.array([0, 0, 0])\n )\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n for i, landmark in enumerate(world.food):\n landmark.color = np.array([0.15, 0.15, 0.65])\n for i, landmark in enumerate(world.forests):\n landmark.color = np.array([0.6, 0.9, 0.6])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np_random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n for i, landmark in enumerate(world.food):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n for i, landmark in enumerate(world.forests):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n if agent.adversary:\n collisions = 0\n for a in self.good_agents(world):\n if self.is_collision(a, agent):\n collisions += 1\n return collisions\n else:\n return 0\n\n def is_collision(self, agent1, agent2):\n delta_pos = agent1.state.p_pos - agent2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = agent1.size + agent2.size\n return True if dist < dist_min else False\n\n # return all agents that are not adversaries\n def good_agents(self, world):\n return [agent for agent in world.agents if not agent.adversary]\n\n # return all adversarial agents\n def adversaries(self, world):\n return [agent for agent in world.agents if agent.adversary]\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n # boundary_reward = -10 if self.outside_boundary(agent) else 0\n main_reward = (\n self.adversary_reward(agent, world)\n if agent.adversary\n else self.agent_reward(agent, world)\n )\n return main_reward\n\n def outside_boundary(self, agent):\n if (\n agent.state.p_pos[0] > 1\n or agent.state.p_pos[0] < -1\n or agent.state.p_pos[1] > 1\n or agent.state.p_pos[1] < -1\n ):\n return True\n else:\n return False\n\n def agent_reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n rew = 0\n shape = False\n adversaries = self.adversaries(world)\n if shape:\n for adv in adversaries:\n rew += 0.1 * np.sqrt(\n np.sum(np.square(agent.state.p_pos - adv.state.p_pos))\n )\n if agent.collide:\n for a in adversaries:\n if self.is_collision(a, agent):\n rew -= 5\n\n def bound(x):\n if x < 0.9:\n return 0\n if x < 1.0:\n return (x - 0.9) * 10\n return min(np.exp(2 * x - 2), 10) # 1 + (x - 1) * (x - 1)\n\n for p in range(world.dim_p):\n x = abs(agent.state.p_pos[p])\n rew -= 2 * bound(x)\n\n for food in world.food:\n if self.is_collision(agent, food):\n rew += 2\n rew -= 0.05 * min(\n np.sqrt(np.sum(np.square(food.state.p_pos - agent.state.p_pos)))\n for food in world.food\n )\n\n return rew\n\n def adversary_reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n rew = 0\n shape = True\n agents = self.good_agents(world)\n adversaries = self.adversaries(world)\n if shape:\n rew -= 0.1 * min(\n np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos)))\n for a in agents\n )\n if agent.collide:\n for ag in agents:\n for adv in adversaries:\n if self.is_collision(ag, adv):\n rew += 5\n return rew\n\n def observation2(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n if not entity.boundary:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n food_pos = []\n for entity in world.food:\n if not entity.boundary:\n food_pos.append(entity.state.p_pos - agent.state.p_pos)\n # communication of all other agents\n comm = []\n other_pos = []\n other_vel = []\n for other in world.agents:\n if other is agent:\n continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n )\n\n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n if not entity.boundary:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n in_forest = [np.array([-1]) for _ in range(len(world.forests))]\n inf = [False for _ in range(len(world.forests))]\n\n for i in range(len(world.forests)):\n if self.is_collision(agent, world.forests[i]):\n in_forest[i] = np.array([1])\n inf[i] = True\n\n food_pos = []\n for entity in world.food:\n if not entity.boundary:\n food_pos.append(entity.state.p_pos - agent.state.p_pos)\n # communication of all other agents\n comm = []\n other_pos = []\n other_vel = []\n for other in world.agents:\n if other is agent:\n continue\n comm.append(other.state.c)\n\n oth_f = [\n self.is_collision(other, world.forests[i])\n for i in range(len(world.forests))\n ]\n\n # without forest vis\n for i in range(len(world.forests)):\n if inf[i] and oth_f[i]:\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n break\n else:\n if ((not any(inf)) and (not any(oth_f))) or agent.leader:\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n else:\n other_pos.append([0, 0])\n if not other.adversary:\n other_vel.append([0, 0])\n\n # to tell the pred when the prey are in the forest\n prey_forest = []\n ga = self.good_agents(world)\n for a in ga:\n if any([self.is_collision(a, f) for f in world.forests]):\n prey_forest.append(np.array([1]))\n else:\n prey_forest.append(np.array([-1]))\n # to tell leader when pred are in forest\n prey_forest_lead = []\n for f in world.forests:\n if any([self.is_collision(a, f) for a in ga]):\n prey_forest_lead.append(np.array([1]))\n else:\n prey_forest_lead.append(np.array([-1]))\n\n comm = [world.agents[0].state.c]\n\n if agent.adversary and not agent.leader:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n + in_forest\n + comm\n )\n if agent.leader:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n + in_forest\n + comm\n )\n else:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + in_forest\n + other_vel\n )\n" ]
[ [ "numpy.zeros", "numpy.exp", "numpy.array", "numpy.concatenate", "numpy.square" ] ]
nicohrubec/blackjack_simulator
[ "b934a61f29ceae1bd37238963dfc83888bcdb5bd" ]
[ "src/game_simulation/players.py" ]
[ "from src import configs\nimport pandas as pd\nimport numpy as np\n\n\n# creates player instances from predefined options\ndef player_factory(player_type, capital):\n if player_type == 'basic':\n return BasicPlayer(init_capital=capital)\n elif player_type == 'strategic':\n return StrategicPlayer(init_capital=capital, file_name='thorp_strategy.xlsx')\n elif player_type == 'counter':\n return CountingPlayer(init_capital=capital, file_name='thorp_strategy.xlsx', bet_unit=5)\n else:\n raise ValueError('There is no such player.')\n\n\n# Meta class from which further player types are inherited. To create new player types from this class you\n# will have to implement both the betting strategy and the playing strategy for the player you want to create.\n# Examples for how to create players from this player class are given below.\nclass Player(object):\n def __init__(self, init_capital):\n self.capital = init_capital\n\n def bet(self):\n raise NotImplementedError # for a given game situation how much does the player want to bet ?\n\n def bet_amount(self, amount):\n self.capital -= amount\n\n def play(self, player_cards, dealer_cards):\n raise NotImplementedError # for a given game situation what move does the player pick ?\n\n def add_capital(self, amount):\n self.capital += amount\n\n def get_capital(self):\n return self.capital\n\n def is_counter(self):\n return False # reimplement this to True if the player deploys a card counting strategy\n\n\nclass BasicPlayer(Player):\n\n def bet(self):\n if self.capital > 5:\n self.capital -= 5\n return 5\n else:\n return 0\n\n def play(self, player_cards, dealer_cards):\n player_value = sum(player_cards)\n\n if player_cards[0] == player_cards[1]:\n return 'P'\n elif player_value == 11:\n return 'D'\n elif player_value < 17:\n return 'H'\n else:\n return 'S'\n\n\n# This player deploys a naive betting strategy but uses a given strategy card (example in strategies folder)\n# to guide his moves.\nclass StrategicPlayer(Player):\n\n def __init__(self, init_capital, file_name):\n super().__init__(init_capital)\n strategy_path = configs.strategies_folder / file_name\n self.strategy_card = pd.read_excel(strategy_path, index_col=0, header=1)\n self.strategy_card.columns = [str(col) for col in self.strategy_card.columns] # convert columns to string\n self.strategy_card.index = self.strategy_card.index.map(str) # convert index to string\n\n def bet(self, *args, **kwargs): # naive betting\n if self.capital > 5:\n self.capital -= 5\n return 5\n else:\n return 0\n\n def play(self, player_cards, dealer_cards):\n player_value = sum(player_cards)\n\n if player_value == 21:\n return 'S'\n\n if len(player_cards) == 2: # first move\n if player_cards[0] == player_cards[1]: # split possible\n player_selector = 'D' + str(player_cards[0]) # eg D8 for double 8s\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n elif 11 in player_cards: # soft hand\n if player_value <= 21:\n player_selector = 'A' + str(player_value - 11)\n else:\n player_selector = str(player_value - 10)\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n else:\n return self.strategy_card.loc[str(player_value), str(dealer_cards)]\n\n else: # further moves --> only hit or stand allowed\n if 11 in player_cards: # soft hand\n if player_value <= 21:\n player_selector = 'A' + str(player_value - 11)\n else:\n player_selector = str(player_value - 10)\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n else: # hard hand\n return self.strategy_card.loc[str(player_value), str(dealer_cards)] if player_value < 21 else 'S'\n\n\n# This player plays basic strategy like the strategic player but he spreads his bet sizes according\n# to the current count. Count method used here is the HILO system. (+1 for 2-6, 0 for 7-9, -1 for 10 valued cards+ace)\n# Bet size is then computed as true count (running_count / number of remaining decks) - 1 * bet unit\nclass CountingPlayer(StrategicPlayer):\n def __init__(self, init_capital, file_name, bet_unit):\n super().__init__(init_capital, file_name)\n self.bet_unit = bet_unit\n self.running_count = 0\n self.num_seen_cards = 0\n\n def bet(self, num_decks, *args, **kwargs):\n super(CountingPlayer, self).bet(num_decks, *args, **kwargs)\n return max((self.get_true_count(num_decks) - 1) * self.bet_unit, self.bet_unit)\n\n def update_count(self, *args):\n for cards in args:\n for card in cards:\n self.running_count += self.get_card_value(card)\n\n self.num_seen_cards += len(cards)\n\n def reset_count(self):\n self.running_count = 0\n self.num_seen_cards = 0\n\n @staticmethod\n def get_card_value(card):\n if card == 1 or card == 11 or card == 10:\n return -1\n elif card < 7:\n return 1\n else:\n return 0\n\n def get_true_count(self, num_decks):\n num_played_decks = np.round(self.num_seen_cards / 52)\n remaining_decks = num_decks - num_played_decks\n\n return self.running_count / remaining_decks\n\n def is_counter(self):\n return True\n" ]
[ [ "numpy.round", "pandas.read_excel" ] ]
MikeFlanigan/DMU_project
[ "6ae5f658f25f2bd10e20a94f0ccc9a6dd669bac9" ]
[ "pf_test_3d.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Wedge\n\nimport mpl_toolkits.mplot3d as a3\nimport matplotlib.colors as colors\nimport pylab as pl\nimport scipy as sp\nimport math\n\n\nrigx = 0.5\nrigy = 0.5\nrigz = 0.0\ncent = (rigx, rigy, rigz)\n\nradius = 0.5\n\ncam_info = {\"FOV pan\": 0, # degrees\n \"FOV tilt\": 0, # degrees (off of horizon, positive up)\n \"FOV width\": 40, # degrees\n \"FOV height\": 30, # degrees\n \"Zoom\": 0.2, # percent max\n \"Focal min\": 0.2, # unitless for now\n \"Focal max\": 0.6, # unitless for now\n \"FOV min_width\": 5, # degrees FOV at max zoom\n \"FOV max_width\": 90, # degrees FOV at min zoom\n \"FOV min_height\": 4, # degrees FOV at max zoom\n \"FOV max_height\": 90*4/5, # degrees FOV at min zoom\n }\n\n# hard core testing\ncam_info[\"FOV min_width\"] = 120\ncam_info[\"FOV max_width\"] = 120\ncam_info[\"FOV min_height\"] = 120\ncam_info[\"FOV max_height\"] = 120\ncam_info[\"Focal max\"] = 0.8\ncam_info[\"Zoom\"] = 1.0\n\n\n\n# camera control in degrees CCW\nctrl = 10\n\nN_particles = 100\n\ndef cart2pol(x, y):\n \"\"\"Return polar coords from cartesian coords.\n Utility fxn.\n \"\"\"\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\ndef pol2cart(rho, phi):\n \"\"\"Return cartesian coords from polar coords.\n Utility fxn.\n \"\"\"\n x = rho * np.cos(phi) \n y = rho * np.sin(phi)\n return(x, y)\n\ndef polar2cart(r, theta, phi):\n theta = np.deg2rad(theta)\n phi = np.deg2rad(90-phi)\n x = r * math.sin(phi) * math.cos(theta)\n y = r * math.sin(phi) * math.sin(theta)\n z = r * math.cos(phi)\n return [x, y, z]\n\ndef sphere2cart(r, theta, phi):\n theta = np.deg2rad(theta)\n phi = np.deg2rad(90-phi)\n \n x = r * math.sin(phi) * math.cos(theta)\n y = r * math.sin(phi) * math.sin(theta)\n z = r * math.cos(phi)\n return [x, y, z]\n\ndef get_FOV_width(cam):\n m = (cam[\"FOV max_width\"] - cam[\"FOV min_width\"])\n width = -m*cam[\"Zoom\"] + cam[\"FOV max_width\"]\n return width\n\ndef get_FOV_height(cam):\n m = (cam[\"FOV max_height\"] - cam[\"FOV min_height\"])\n height = -m*cam[\"Zoom\"] + cam[\"FOV max_height\"]\n return height\n\ndef get_focal_dist(cam):\n m = (cam[\"Focal max\"] - cam[\"Focal min\"])\n f_dist = m*cam[\"Zoom\"] + cam[\"Focal min\"]\n return f_dist\n\ndef get_FOV_ends(cent, FOV):\n \"\"\"Take center of FOV and FOV in degrees [0-360]\n and return low and high endpoints in degrees.\n \"\"\"\n l = (cent - FOV/2) % 360\n h = (cent + FOV/2) % 360\n return [l,h]\n\ndef get_verts(r,theta,phi,cam):\n verts = [] # list of 3x3 patches of vertices\n rt = r # approximation for now\n x1, y1, z1 = 0, 0, 0\n x2, y2, z2 = polar2cart(r, theta+cam[\"FOV width\"]/2, phi+cam[\"FOV height\"]/2)\n x3, y3, z3 = polar2cart(r, theta-cam[\"FOV width\"]/2, phi+cam[\"FOV height\"]/2)\n x4, y4, z4 = polar2cart(r, theta+cam[\"FOV width\"]/2, phi-cam[\"FOV height\"]/2)\n x5, y5, z5 = polar2cart(r, theta-cam[\"FOV width\"]/2, phi-cam[\"FOV height\"]/2)\n \n verts.append(np.asarray([[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]])) # top\n verts.append(np.asarray([[x1,y1,z1],[x3,y3,z3],[x5,y5,z5]])) # right \n verts.append(np.asarray([[x1,y1,z1],[x5,y5,z5],[x4,y4,z4]])) # bottom\n verts.append(np.asarray([[x1,y1,z1],[x4,y4,z4],[x2,y2,z2]])) # left \n verts.append(np.asarray([[x2,y2,z2],[x3,y3,z3],[x5,y5,z5],[x4,y4,z4]])) # face\n\n # apply rig translation\n for v in verts:\n v[:,0] += cent[0] # x\n v[:,1] += cent[1] # y\n v[:,2] += cent[2] # z\n return verts\n\ndef in_FOV(p, cam):\n # check if the particle is within the focal distance of the camera\n if p[0] <= get_focal_dist(cam):\n # check if the particle is within the pan FOV of the camera\n if ((abs(p[1]-cam[\"FOV pan\"]) % 360) <= cam[\"FOV width\"]/2):\n # check if the particle is within the tilt FOV of the camera\n if ((abs(p[2]-cam[\"FOV tilt\"]) % 360) <= cam[\"FOV height\"]/2):\n return True\n # if not in the FOV then:\n return False\n\nclass visual():\n \"\"\"Class for visualizing this experiment.\"\"\"\n \n def __init__(self):\n # setup plot \n ax = a3.Axes3D(pl.figure())\n\n # set axis planes to be white for visibility\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n \n self.viz = ax\n\n\n def update(self, cam, particles, target, incre = None):\n pl.cla()\n\n self.viz.set_xlim([-0.1,1.1])\n self.viz.set_ylim([-0.1,1.1])\n self.viz.set_zlim([0,1.5])\n\n self.viz.set_xlabel('X')\n self.viz.set_ylabel('Y')\n self.viz.set_zlabel('Z')\n \n # center point (rig)\n self.viz.plot([cent[0]],[cent[1]],[cent[2]],'ro')\n\n for k in range(particles.shape[1]):\n r = particles[0,k]\n th = particles[1,k]\n phi = particles[2,k]\n \n x , y, z = sphere2cart(r, th, phi)\n x += cent[0]\n y += cent[1]\n if in_FOV(particles[:,k], cam):\n self.viz.plot([x],[y],[z],'rx')\n else:\n self.viz.plot([x],[y],[z],'kx')\n \n # create and plot camera FOV patches\n vtxs = get_verts(get_focal_dist(cam), cam[\"FOV pan\"], cam[\"FOV tilt\"], cam)\n for v in vtxs:\n tri = a3.art3d.Poly3DCollection([v])\n tri.set_alpha(0.2)\n tri.set_facecolor('g')\n tri.set_edgecolor('k')\n self.viz.add_collection3d(tri)\n \n if incre != None:\n## plt.savefig('./videos/imgs_01/'+str(incre))\n pl.pause(0.05)\n else: \n pl.pause(0.05)\n\ndef low_variance_resample(b, w):\n num_particles = b.shape[1]\n \n bnew = np.zeros(b.shape)\n r = np.random.rand()\n c = w[0]\n i = 0\n for m in range(num_particles):\n U = r + (m - 1)/num_particles\n while U > c:\n i += 1\n i = i % num_particles\n c += w[i]\n noise = np.asarray([np.random.rand()*radius/50-(radius/50)/2,\n np.random.randint(-1,2),\n np.random.randint(-1,2)])\n bnew[:,m] = b[:,i] + noise\n bnew[0,m] = np.clip(bnew[0,m],0.01*radius,radius) # clipping the particle distances\n## print(m,i)\n return bnew\n\ndef domain_resample(b, percent):\n \"\"\"Reject a small % of particles and replace them from a known distribution.\"\"\"\n cut = int(percent*b.shape[1])\n if cut <= 0: cut = 1\n fresh_radis = np.random.rand(1,cut)*radius\n fresh_thetas = np.random.rand(1,cut)*360\n fresh_phis = np.random.rand(1,cut)*90\n fresh_partics = np.concatenate((fresh_radis,fresh_thetas),0)\n fresh_partics = np.concatenate((fresh_partics,fresh_phis),0)\n # hm, just resampling z% of all particles seems to work well.\n # noteably this cuts down heaviest on particle dense regions.\n # however since in this filtering case, particle dense regions\n # are actually an artifact, this turns out to be good and rational.\n b = np.concatenate((b[:,cut:],fresh_partics),axis = 1)\n return b\n\n\ndef update_belief(b, cam, observation):\n weights = [1/len(b[0])]*len(b[0])\n weights = np.asarray(weights)\n\n b = domain_resample(b,0.01)\n \n i = 0\n for k in range(b.shape[1]):\n # if contained generative model would update here\n # based on states like velocity\n\n # TODO: write actual camera observation likelihoods based on zoom percents\n # assign weight to every particle based on observation\n # TODO: add tilt component to this detection check\n # check pan, check radius, check tilt\n if in_FOV(b[:,k], cam):\n if observation:\n weights[i] = 10*weights[i]\n else:\n weights[i] = 0.005*weights[i]\n else:\n pass # no observation on this particle\n\n i += 1 # increment index\n \n # normalize the weights to a valid probability distribution\n weights = weights/weights.sum()\n \n # use the low variance resampling algorithm from the Probabilistic Robotics Book\n b_new = low_variance_resample(b, weights)\n\n # TODO: maybe add a flag condition, if variance ever does get super low, resample\n # uniformly?\n \n return b_new\n \n# initialize belief to a random distribution of particles\nradis = np.random.rand(1,N_particles)*radius\nthetas = np.random.rand(1,N_particles)*360\nphis = np.random.rand(1,N_particles)*90\nbelief = np.concatenate((radis,thetas),0) \nbelief = np.concatenate((belief,phis),0) # belief is now 3d state, r, theta, phi\n\nviz = visual()\nfor i in range(550):\n\n # camera control\n # rand control for testing\n## if i % 2 == 0:\n## cam_info[\"FOV pan\"] = np.random.randint(0,360)\n## cam_info[\"FOV tilt\"] = np.random.randint(0,90)\n## cam_info[\"Zoom\"] = np.random.rand()\n \n cam_info[\"FOV pan\"] += ctrl\n cam_info[\"FOV pan\"] = cam_info[\"FOV pan\"] % 360\n cam_info[\"FOV width\"] = get_FOV_width(cam_info)\n cam_info[\"FOV height\"] = get_FOV_height(cam_info)\n \n # update belief based on current state\n belief = update_belief(belief, cam_info, False)\n\n## cam_info[\"Zoom\"] = 0.2\n\n # display what's happening\n plt.figure(1)\n viz.update(cam_info, belief, 0, incre = i)\n \nplt.close()\n\n\n\n" ]
[ [ "numpy.arctan2", "numpy.zeros", "matplotlib.pyplot.figure", "numpy.asarray", "numpy.cos", "numpy.clip", "numpy.random.rand", "matplotlib.pyplot.close", "numpy.sqrt", "numpy.sin", "numpy.concatenate", "numpy.random.randint", "numpy.deg2rad" ] ]
Euro2xx/generative-compression
[ "489f4fb77620e9b6d8f3bef691d32d50f6bb09be" ]
[ "compress.py" ]
[ "#!/usr/bin/python3\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport time, os, sys\nimport argparse\n\n# User-defined\nfrom network import Network\nfrom utils import Utils\nfrom data import Data\nfrom model import Model\nfrom config import config_test, directories\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndef single_compress(config, args):\n start = time.time()\n\n paths = np.array([args.image_path])\n\n gan = Model(config, paths, name='single_compress', evaluate=True)\n saver = tf.train.Saver()\n\n feed_dict_init = {gan.path_placeholder: paths}\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n handle = sess.run(gan.train_iterator.string_handle())\n\n if args.restore_last:\n ckpt = tf.train.get_checkpoint_state(directories.checkpoints)\n assert (ckpt.model_checkpoint_path), 'Missing checkpoint file!'\n\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Most recent {} restored.'.format(ckpt.model_checkpoint_path))\n else:\n if args.restore_path:\n new_saver = tf.train.import_meta_graph('{}.meta'.format(args.restore_path))\n new_saver.restore(sess, args.restore_path)\n print('Previous checkpoint {} restored.'.format(args.restore_path))\n\n sess.run(gan.train_iterator.initializer, feed_dict=feed_dict_init)\n eval_dict = {gan.training_phase: False, gan.handle: handle}\n\n if args.output_path is None:\n output = os.path.splitext(os.path.basename(args.image_path))\n save_path = os.path.join(directories.samples, '{}_compressed.pdf'.format(output[0]))\n else:\n save_path = args.output_path\n Utils.single_plot(0, 0, sess, gan, handle, save_path, config, single_compress=True)\n print('Reconstruction saved to', save_path)\n\n return\n\n\ndef main(**kwargs):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-rl\", \"--restore-last\", help=\"restore last saved model\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--restore-path\", help=\"path to model to be restored\", type=str)\n parser.add_argument(\"-i\", \"--image-path\", help=\"path to image to compress\", type=str)\n parser.add_argument(\"-sm\", \"--semantic-map-path\", help=\"path to corresponding semantic map\", type=str)\n parser.add_argument(\"-o\", \"--output-path\", help=\"path to output image\", type=str)\n #parser.add_argument(\"-ds\", \"--dataset\", default=\"cityscapes\", help=\"choice of training dataset. Currently only supports cityscapes/ADE20k\", choices=set((\"cityscapes\", \"ADE20k\")), type=str)\n args = parser.parse_args()\n\n # Launch training\n single_compress(config_test, args)\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.global_variables_initializer", "tensorflow.train.get_checkpoint_state", "tensorflow.logging.set_verbosity", "tensorflow.ConfigProto", "tensorflow.train.Saver", "numpy.array", "tensorflow.local_variables_initializer" ] ]
slimgroup/Software.siahkoohi2020EAGEdlb
[ "5d20f6891ca8a17fa1695ed629c3a589ab8416bd" ]
[ "src/sample.py" ]
[ "import torch\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\nfrom load_vel import overthrust_model\nfrom generator import generator\nfrom tqdm import tqdm\nfrom scipy.interpolate import interp1d\nimport matplotlib.ticker as ticker\nsfmt=ticker.ScalarFormatter(useMathText=True) \nsfmt.set_powerlimits((0, 0))\nimport matplotlib\n\nclass Sample(object):\n def __init__(self, args):\n\n if torch.cuda.is_available() and args.cuda:\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self.device = torch.device('cuda')\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n print(' [*] GPU is available')\n else:\n self.device = torch.device('cpu')\n torch.set_default_tensor_type('torch.FloatTensor')\n self.build_model(args)\n \n def build_model(self, args):\n\n m0, m, self.dm, spacing, shape, origin = overthrust_model(args.vel_dir)\n self.extent = np.array([0., self.dm.shape[2]*spacing[0], \n self.dm.shape[3]*spacing[1], 0.])/1.0e3\n self.dm = self.dm.to(self.device) \n self.load(args, os.path.join(args.checkpoint_dir, args.experiment))\n self.burn_in_index = 52\n\n def load(self, args, checkpoint_dir):\n\n log_to_load = os.path.join(checkpoint_dir, 'training-logs.pt')\n assert os.path.isfile(log_to_load)\n\n if args.cuda == 0:\n training_logs = torch.load(log_to_load, map_location='cpu')\n else:\n training_logs = torch.load(log_to_load)\n print(' [*] Samples loaded')\n self.net_loss_log = training_logs['net_loss_log']\n self.model_loss_log = training_logs['model_loss_log']\n self.samples = training_logs['samples']\n assert len(self.samples) > self.burn_in_index\n \n def test(self, args):\n\n \n fig = plt.figure(\"profile\", dpi=200, figsize=(7, 2.5))\n plt.imshow(self.dm[0, 0, :, :].t().cpu().numpy(), vmin=-3.0/100.0, vmax=3.0/100.0, \n aspect=1, extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(\"True model - \" + r\"$\\delta \\mathbf{m}$\");\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"dm.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n self.samples = np.array(self.samples)\n self.samples = np.transpose(self.samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n for j in range(self.samples.shape[0]):\n self.samples[j, :, :] = self.model_topMute(self.samples[j, :, :])\n\n samples_mean = np.mean(self.samples[self.burn_in_index:, :, :], axis=0)\n samples_std = np.std(self.samples[self.burn_in_index:, :, :], axis=0)\n\n if not os.path.exists(os.path.join(args.sample_dir, args.experiment, \"Gzs\")):\n os.makedirs(os.path.join(args.sample_dir, args.experiment, \"Gzs\"))\n\n idxs = np.random.choice(self.samples[self.burn_in_index:, :, :].shape[0], 5, replace=False)\n for i in idxs:\n fig = plt.figure(\"G(z_0)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i], vmin=-3.0/100.0, vmax=3.0/100.0, aspect=1, \\\n extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}})$\".format(i));\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz\" + str(i) + \".png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"G(z_0) - G(z_i)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i] - self.samples[self.burn_in_index], \\\n vmin=-2e-2, vmax=2e-2, aspect=1, extent=self.extent, cmap=\"twilight_shifted\", interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}}) - $\".format(i) + \n r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_{{0}})$\"); \n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz_\" + str(i) + \"-Gz0.png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"G(z_0) -mean\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i] - samples_mean, \\\n vmin=-2e-2, vmax=2e-2, aspect=1, extent=self.extent, cmap=\"twilight_shifted\", interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}}) - $\".format(i) + \n r\"$\\delta \\widehat { \\mathbf{m}}$\"); \n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz_\" + str(i) + \"-mean.png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n fig = plt.figure(\"mean of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_mean, vmin=-3.0/100.0, vmax=3.0/100.0, aspect=1, extent=self.extent, cmap=\"seismic\", \n alpha=0.6, interpolation=\"kaiser\")\n plt.title(r\"$\\delta \\widehat { \\mathbf{m}} $\" + \" - mean of \" + \n r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_j)$\" + \"'s\" + \n r\"$, \\ \\widehat{{\\mathbf{w}}}_j \\sim p_{post} ( \\mathbf{w} | \\left \\{ \\mathbf{d}_{i}, \\mathbf{q}_{i} \\right \\}_{i=1}^N, \\mathbf{z} )$\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Gz-mean.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n \n x_loc = [334, 64]\n y_loc = [65, 79]\n fig = plt.figure(\"std of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_std, vmin=0., vmax=9e-3, aspect=1, extent=self.extent, cmap=\"OrRd\", \n interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.plot(x_loc[0]*0.025, y_loc[0]*0.025, marker=\"o\", ms=10, alpha=0.9, c=\"#00b4ba\", \n markerfacecolor=\"None\", markeredgewidth=1.2)\n plt.plot(x_loc[1]*0.025, y_loc[1]*0.025, marker=\"o\", ms=10, alpha=0.9, c=\"#00b4ba\",\n markerfacecolor=\"None\", markeredgewidth=1.2)\n plt.plot(x_loc[0]*0.025, y_loc[0]*0.025, marker=\"o\", ms=10, alpha=0.2, c=\"None\",\n markerfacecolor=\"#00b4ba\", markeredgewidth=.01)\n plt.plot(x_loc[1]*0.025, y_loc[1]*0.025, marker=\"o\", ms=10, alpha=0.2, c=\"None\",\n markerfacecolor=\"#00b4ba\", markeredgewidth=.01)\n plt.title(\"Point-wise standard deviation of \" + r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_j)$\" + \"'s\")\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Gz-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n norm_fac = 0.0098\n fig = plt.figure(\"profile\", dpi=200, figsize=(7, 2.5))\n plt.imshow(self.dm[0, 0, :, :].t().cpu().numpy(), vmin=-3.0/100.0, vmax=3.0/100.0, \n aspect=1, extent=self.extent, cmap=\"seismic\", alpha=0.3, interpolation=\"kaiser\")\n horiz_loz = [50, 150, 250, 350]\n for loc in horiz_loz:\n plt.plot(samples_std[:, loc]/norm_fac + loc*.025, \n np.linspace(0., 3.025, samples_std.shape[0]), \n color=\"#0a9c00\", lw=1.4, alpha=0.7);\n plt.plot(np.zeros(self.dm.shape[3]) + loc*.025, \n np.linspace(0., 3.025, samples_std.shape[0]), color=\"k\", lw=1.4, alpha=0.5);\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt);\n plt.title(\"Point-wise standard deviation vertical profiles\");\n plt.xlabel(\"Horizontal distance (km)\"); plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"overlaid-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n def sample_prior(self, args):\n\n samples_to_load = os.path.join('./checkpoint', 'prior_samples.pt')\n if os.path.isfile(samples_to_load):\n self.prior_samples = torch.load(samples_to_load)['prior_samples']\n print(' [*] Prior samples loaded')\n else:\n print(' [*] Computing samples from the prior')\n self.prior_samples = []\n self.z = torch.randn((1, 3, 512, 128), device=self.device, requires_grad=False)\n for j in tqdm(range(5000)):\n self.G = generator(\n self.dm.size(),\n num_input_channels=3, num_output_channels=1, \n num_channels_down = [16, 32, 256],\n num_channels_up = [16, 32, 256],\n num_channels_skip = [0, 0, 0],\n upsample_mode = 'bicubic',\n need1x1_up = True,\n filter_size_down=5, \n filter_size_up=3,\n filter_skip_size = 1,\n need_sigmoid=False, \n need_bias=True, \n pad='reflection', \n act_fun='LeakyReLU').to(self.device)\n self.prior_samples.append(self.G(self.z).detach().cpu().numpy())\n\n torch.save({'prior_samples': self.prior_samples}, os.path.join('./checkpoint',\n 'prior_samples.pt'))\n print(' [*] Prior samples saved')\n\n self.prior_samples = np.array(self.prior_samples)\n self.prior_samples = np.transpose(self.prior_samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n\n samples_mean = np.mean(self.prior_samples, axis=0)\n samples_std = np.std(self.prior_samples, axis=0)\n\n if not os.path.exists(os.path.join(args.sample_dir, args.experiment, \"Prior\")):\n os.makedirs(os.path.join(args.sample_dir, args.experiment, \"Prior\"))\n\n idxs = np.random.choice(1000, 5, replace=False)\n for i in idxs:\n fig = plt.figure(\"G(z_0)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.prior_samples[i], vmin=-20.0/100.0, vmax=20.0/100.0, aspect=1, \\\n extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_0)$\" \n + r\"$, \\ \\mathbf{w}_0 \\sim p_{prior} ( \\mathbf{w} )$\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Prior\", \"Gz\" + str(i) + \".png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n fig = plt.figure(\"mean of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_mean, vmin=np.min(samples_mean), vmax=-np.min(samples_mean), \n aspect=1, extent=self.extent, cmap=\"seismic\", \n alpha=0.6, interpolation=\"kaiser\")\n plt.title(\"Mean of \" + r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_i)$\" + \"'s\" + \n r\"$, \\ \\mathbf{w}_i \\sim p_{prior} ( \\mathbf{w} )$\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Prior-mean.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"std of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_std, vmin=np.min(samples_std), vmax=np.max(samples_std), \n aspect=1, extent=self.extent, cmap=\"OrRd\", \n interpolation=\"kaiser\")\n plt.title(\"Point-wise standard deviation of \" + r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_i)$\" + \"'s\" )\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Prior-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n assert len(self.samples) > 0\n self.samples = np.array(self.samples)\n self.samples = np.transpose(self.samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n for j in range(self.samples.shape[0]):\n self.samples[j, :, :] = self.model_topMute(self.samples[j, :, :])\n self.samples = self.samples[self.burn_in_index:, :, :]\n x_loc = [334, 64]\n y_loc = [65, 79]\n for ix, iy in zip(x_loc, y_loc):\n hist_init = []\n hist_trained = []\n for i in range(self.prior_samples.shape[0]):\n hist_init.append(self.prior_samples[i, iy, ix])\n for i in range(self.samples.shape[0]):\n hist_trained.append(self.samples[i, iy, ix])\n fig = plt.figure(\"hist\", dpi=100, figsize=(7, 2))\n n, bins, _ = plt.hist(np.array(hist_init), bins=np.linspace(-0.10, 0.10, 100), \n density=False, label=\"prior\", color=\"#ff8800\", alpha=0.5, histtype='bar')\n plt.hist(np.array(hist_trained), 12, density=True, label=\"posterior\", \n color=\"#00b4ba\", alpha=0.8, histtype='bar')\n plt.title(\"Point-wise histogram at (\" + \"{0:.2f}\".format(ix*.025) + \n \" km, \" + \"{0:.2f}\".format(iy*.025) + \" km)\");\n # plt.vlines(self.dm[0, 0, ix, iy], 0, 200, lw=0.8, label=r\"$\\delta \\mathbf{m}$\")\n plt.xlabel(\"Perturbation\");\n plt.legend()\n plt.grid()\n plt.xlim([-0.10, 0.10])\n plt.ylim([0, 125])\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"histogram-at-\" + \n \"{}\".format(ix) + \"x\" + \"{}\".format(iy) + \".png\"), format=\"png\", \n bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n # plt.stem(bins[:-1],n/10)\n\n\n def model_topMute(self, image, mute_end=20, length=1):\n\n mute_start = mute_end - length\n damp = np.zeros([image.shape[0]])\n damp[0:mute_start-1] = 0.\n damp[mute_end:] = 1.\n taper_length = mute_end - mute_start + 1\n taper = (1. + np.sin((np.pi/2.0*np.array(range(0,taper_length-1)))/(taper_length - 1)))/2.\n damp[mute_start:mute_end] = taper\n for j in range(0, image.shape[1]):\n image[:,j] = image[:,j]*damp\n return image" ]
[ [ "torch.set_default_tensor_type", "matplotlib.pyplot.imshow", "torch.cuda.is_available", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "torch.randn", "matplotlib.pyplot.figure", "numpy.random.choice", "matplotlib.pyplot.xlim", "matplotlib.pyplot.title", "torch.device", "numpy.linspace", "numpy.mean", "torch.load", "numpy.zeros", "matplotlib.ticker.ScalarFormatter", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "matplotlib.pyplot.ylim", "numpy.std", "matplotlib.pyplot.colorbar", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
shizizhou/ROAR
[ "f143605d84f30f071e24e8224014c0358e260c42" ]
[ "ROAR/agent_module/legacy_agents/point_cloud_map_recording_agent.py" ]
[ "from ROAR.agent_module.agent import Agent\r\nfrom ROAR.utilities_module.data_structures_models import SensorsData\r\nfrom ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\r\nfrom ROAR.perception_module.legacy.ground_plane_point_cloud_detector import GroundPlanePointCloudDetector\r\nfrom ROAR.visualization_module.visualizer import Visualizer\r\nimport numpy as np\r\nimport cv2\r\nfrom pathlib import Path\r\nfrom ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner\r\nfrom ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner\r\nfrom ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \\\r\n SimpleWaypointFollowingLocalPlanner\r\n\r\nfrom typing import List\r\nfrom ROAR.control_module.pid_controller import PIDParam\r\nfrom ROAR.control_module.pid_controller import VehiclePIDController\r\nfrom ROAR.utilities_module.data_structures_models import MapEntry\r\n\r\n\r\nclass PointCloudMapRecordingAgent(Agent):\r\n def __init__(self, **kwargs):\r\n super(PointCloudMapRecordingAgent, self).__init__(**kwargs)\r\n self.logger.debug(\"GPD2 Agent Initialized\")\r\n self.route_file_path = Path(self.agent_settings.waypoint_file_path)\r\n self.mission_planner = WaypointFollowingMissionPlanner(agent=self)\r\n # initiated right after mission plan\r\n self.controller = \\\r\n self.pid_controller = VehiclePIDController(agent=self,\r\n args_lateral=PIDParam.default_lateral_param(),\r\n args_longitudinal=PIDParam.default_longitudinal_param(),\r\n target_speed=20)\r\n self.behavior_planner = BehaviorPlanner(agent=self)\r\n self.local_planner = SimpleWaypointFollowingLocalPlanner(\r\n agent=self,\r\n controller=self.controller,\r\n mission_planner=self.mission_planner,\r\n behavior_planner=self.behavior_planner,\r\n closeness_threshold=1)\r\n self.ground_plane_point_cloud_detector = GroundPlanePointCloudDetector(agent=self, max_points_to_convert=20000,\r\n ground_tilt_threshhold=0.05)\r\n self.visualizer = Visualizer(agent=self)\r\n self.map_history: List[MapEntry] = []\r\n self.file_written = False\r\n\r\n def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:\r\n super(PointCloudMapRecordingAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle)\r\n control = self.local_planner.run_in_series()\r\n try:\r\n ground_points = self.ground_plane_point_cloud_detector.run_in_series()\r\n\r\n # print(np.shape(ground_points))\r\n color_image = self.front_rgb_camera.data.copy()\r\n ground_cords_in_2d: np.ndarray = self.visualizer.world_to_img_transform(xyz=ground_points)[:, :2]\r\n # this is a hack, without 5000 threshold, it sometimes have false detection\r\n # if np.shape(ground_cords_in_2d)[0] > 4000:\r\n # estimate left = (x_min, img_pos[1]) and right = (x_max, img_pos[1])\r\n img_positions = self.visualizer.world_to_img_transform(\r\n np.array([self.local_planner.way_points_queue[1].location.to_array()]))\r\n img_pos = img_positions[0]\r\n y_range = img_pos[1] - 5, img_pos[1] + 5\r\n indices = np.where(\r\n np.logical_and(ground_cords_in_2d[:, 1] >= y_range[0], ground_cords_in_2d[:, 1] <= y_range[1]))\r\n bar_cords = ground_cords_in_2d[indices]\r\n x_min, y_min = np.amin(bar_cords, axis=0)\r\n x_max, y_max = np.amax(bar_cords, axis=0)\r\n left_img_cord, right_img_cord = (x_min, img_pos[1]), (x_max, img_pos[1])\r\n pts = self.img_cords_to_world_cords(left_img_cord, right_img_cord)\r\n\r\n # save it\r\n self.map_history.append(MapEntry(point_a=pts[0].tolist(), point_b=pts[1].tolist()))\r\n\r\n # visualize\r\n color_image[ground_cords_in_2d[:, 1], ground_cords_in_2d[:, 0]] = [255, 255, 255]\r\n for y, x, _ in img_positions:\r\n color_image[x - 2: x + 2, y - 2:y + 2] = self.visualizer.GREEN\r\n image = cv2.line(color_image, left_img_cord, right_img_cord, (0, 255, 0), 5)\r\n cv2.imshow(\"color\", image)\r\n cv2.waitKey(1)\r\n except Exception as e:\r\n self.logger.error(e)\r\n\r\n # write it to file\r\n if self.local_planner.is_done() and self.file_written is False:\r\n self.logger.debug(\"WRITING TO FILE\")\r\n output_file_path: Path = Path(\r\n self.agent_settings.output_data_folder_path) / \"easy_map_waypoints_pointcloud_v3.json\"\r\n f = output_file_path.open('w')\r\n import json\r\n json.dump(fp=f, obj=[map_entry.dict() for map_entry in self.map_history], indent=2)\r\n f.close()\r\n self.file_written = True\r\n return control\r\n\r\n def img_cords_to_world_cords(self, left_img_cord, right_img_cord):\r\n \"\"\"\r\n Converts depth data from the Front Depth Camera to World coordinates.\r\n\r\n Args:\r\n left_img_cord ():\r\n right_img_cord ():\r\n\r\n Returns:\r\n points: World coordinates in map\r\n \"\"\"\r\n depth = self.front_depth_camera.data\r\n # depth_center = depth[img_pos_center[1]][img_pos_center[0]] * 1000\r\n depth_left = depth[left_img_cord[1]][left_img_cord[0]] * 1000\r\n depth_right = depth[right_img_cord[1]][right_img_cord[0]] * 1000\r\n\r\n # reconstruct p2d and transform it back to world space\r\n raw_p2d = np.array([\r\n [left_img_cord[0] * depth_left, left_img_cord[1] * depth_left, depth_left],\r\n # [right_img_cord[0] * depth_center, right_img_cord[1] * depth_center, depth_center],\r\n [right_img_cord[0] * depth_right, right_img_cord[1] * depth_right, depth_right]\r\n ])\r\n cords_y_minus_z_x = np.linalg.inv(self.front_depth_camera.intrinsics_matrix) @ raw_p2d.T\r\n cords_xyz_1 = np.vstack([\r\n cords_y_minus_z_x[2, :],\r\n cords_y_minus_z_x[0, :],\r\n -cords_y_minus_z_x[1, :],\r\n np.ones((1, np.shape(cords_y_minus_z_x)[1]))\r\n ])\r\n points: np.ndarray = self.vehicle.transform.get_matrix() @ self.front_depth_camera.transform.get_matrix() @ cords_xyz_1\r\n points = points.T[:, :3]\r\n return points\r\n\r\n @staticmethod\r\n def _pix2xyz(depth_img, i, j):\r\n return [\r\n depth_img[i, j] * j * 1000,\r\n depth_img[i, j] * i * 1000,\r\n depth_img[i, j] * 1000\r\n ]\r\n" ]
[ [ "numpy.linalg.inv", "numpy.logical_and", "numpy.amax", "numpy.amin", "numpy.shape", "numpy.array" ] ]
rhong3/Neutrophil
[ "97efb7cc01dc7b1bb06e29a824352d493bb4add5" ]
[ "scripts/Legacy/deprecated/cnnva.py" ]
[ "#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 7 12:02:45 2017\n\n@authors: lwk, RH\n\n\"\"\"\n\nfrom datetime import datetime\nimport os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport vgg\n\nslim = tf.contrib.slim\n\n\nclass INCEPTION():\n \"\"\"\n Use the InceptionV3 architecture\n\n \"\"\"\n\n DEFAULTS = {\n \"batch_size\": 128,\n \"dropout\": 0.8,\n \"learning_rate\": 1E-3\n }\n\n RESTORE_KEY = \"cnn_to_restore\"\n\n def __init__(self, input_dim, d_hyperparams={},\n save_graph_def=True, meta_graph=None,\n log_dir=\"./log\"):\n\n self.input_dim = input_dim\n self.__dict__.update(INCEPTION.DEFAULTS, **d_hyperparams)\n self.sesh = tf.Session()\n\n if meta_graph: # load saved graph\n model_name = os.path.basename(meta_graph)\n meta_graph = os.path.abspath(meta_graph)\n tf.train.import_meta_graph(log_dir + '/' + model_name +'.meta').restore(\n self.sesh, log_dir + '/' + model_name)\n handles = self.sesh.graph.get_collection(INCEPTION.RESTORE_KEY)\n\n\n else: # build graph from scratch\n self.datetime = datetime.now().strftime(r\"%y%m%d_%H%M\")\n handles = self._buildGraph()\n for handle in handles:\n tf.add_to_collection(INCEPTION.RESTORE_KEY, handle)\n self.sesh.run(tf.global_variables_initializer())\n\n # unpack handles for tensor ops to feed or fetch for lower layers\n (self.x_in, self.dropout_, self.is_train,\n self.y_in, self.logits, self.net, self.w, self.pred, self.pred_cost,\n self.global_step, self.train_op, self.merged_summary) = handles\n\n # print(self.batch_size,flush=True)\n # print(self.learning_rate,flush=True)\n\n if save_graph_def: # tensorboard\n try:\n os.mkdir(log_dir + '/training')\n os.mkdir(log_dir + '/validation')\n\n except(FileExistsError):\n pass\n\n self.train_logger = tf.summary.FileWriter(log_dir + '/training', self.sesh.graph)\n self.valid_logger = tf.summary.FileWriter(log_dir + '/validation', self.sesh.graph)\n\n @property\n def step(self):\n return self.global_step.eval(session=self.sesh)\n\n def _buildGraph(self):\n x_in = tf.placeholder(tf.float32, shape=[None, # enables variable batch size\n self.input_dim[0]], name=\"x\")\n x_in_reshape = tf.reshape(x_in, [-1, self.input_dim[1], self.input_dim[2], 3])\n\n dropout = tf.placeholder_with_default(1., shape=[], name=\"dropout\")\n\n y_in = tf.placeholder(dtype=tf.int8, name=\"y\")\n\n onehot_labels = tf.one_hot(indices=tf.cast(y_in, tf.int32), depth=2)\n\n is_train = tf.placeholder_with_default(True, shape=[], name=\"is_train\")\n\n logits, nett, ww = vgg.vgg_a(x_in_reshape,\n num_classes=2,\n is_training=is_train,\n dropout_keep_prob=dropout,\n spatial_squeeze=True,\n scope='vgga')\n\n pred = tf.nn.softmax(logits, name=\"prediction\")\n\n global_step = tf.Variable(0, trainable=False)\n\n pred_cost = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n\n tf.summary.scalar(\"InceptionV3_cost\", pred_cost)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss=pred_cost,\n learning_rate=self.learning_rate,\n global_step=global_step,\n optimizer=\"Adam\")\n\n merged_summary = tf.summary.merge_all()\n\n return (x_in, dropout, is_train,\n y_in, logits, nett, ww, pred, pred_cost,\n global_step, train_op, merged_summary)\n\n def inference(self, x, train_status=False):\n feed_dict = {self.x_in: x, self.is_train: train_status}\n fetches = [self.pred, self.net, self.w]\n return self.sesh.run(fetches, feed_dict=feed_dict)\n\n\n def get_global_step(self, X):\n x, y = X.train.next_batch(self.batch_size)\n\n feed_dict = {self.x_in: x, self.y_in: y,\n self.dropout_: self.dropout}\n\n fetches = [self.global_step]\n\n # Benchmark the learning\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n i = self.sesh.run(fetches, feed_dict)\n\n return i\n\n\n def train(self, X, max_iter=np.inf, max_epochs=np.inf, cross_validate=True,\n verbose=True, save=True, outdir=\"./out\"):\n\n if save:\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)\n\n try:\n err_train = 0\n now = datetime.now().isoformat()[11:]\n print(\"------- Training begin: {} -------\\n\".format(now), flush=True)\n\n while True:\n x, y = X.train.next_batch(self.batch_size)\n\n feed_dict = {self.x_in: x, self.y_in: y,\n self.dropout_: self.dropout}\n\n fetches = [self.merged_summary, self.logits, self.pred,\n self.pred_cost, self.global_step, self.train_op]\n\n # Benchmark the learning\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n summary, logits, pred, cost, i, _ = self.sesh.run(fetches, feed_dict\n # options=run_options,\n # run_metadata=run_metadata\n )\n\n self.train_logger.add_summary(summary, i)\n\n # get runtime statistics every 1000 runs\n # if i%1000 == 0:\n # self.logger.add_run_metadata(run_metadata, 'step%d' % i)\n err_train += cost\n\n if i % 1000 == 0 and verbose:\n # print(\"round {} --> avg cost: \".format(i), err_train / i, flush=True)\n print(\"round {} --> cost: \".format(i), cost, flush=True)\n\n elif i == max_iter and verbose:\n print(\"round {} --> cost: \".format(i), cost, flush=True)\n\n\n if i % 1000 == 0 and verbose: # and i >= 10000:\n\n if cross_validate:\n x, y = X.validation.next_batch(self.batch_size)\n feed_dict = {self.x_in: x, self.y_in: y}\n fetches = [self.pred_cost, self.merged_summary]\n valid_cost, valid_summary = self.sesh.run(fetches, feed_dict)\n\n self.valid_logger.add_summary(valid_summary, i)\n\n print(\"round {} --> CV cost: \".format(i), valid_cost, flush=True)\n print(valid_summary)\n\n elif i == max_iter and verbose: # and i >= 10000:\n\n if cross_validate:\n x, y = X.validation.next_batch(self.batch_size)\n feed_dict = {self.x_in: x, self.y_in: y}\n fetches = [self.pred_cost, self.merged_summary]\n valid_cost, valid_summary = self.sesh.run(fetches, feed_dict)\n\n self.valid_logger.add_summary(valid_summary, i)\n\n print(\"round {} --> CV cost: \".format(i), valid_cost, flush=True)\n print(valid_summary)\n\n\n \"\"\" \n if i%50000 == 0 and save:\n interfile=os.path.join(os.path.abspath(outdir), \"{}_cnn_{}\".format(\n self.datetime, \"_\".join(map(str, self.input_dim))))\n saver.save(self.sesh, interfile, global_step=self.step)\n \"\"\"\n\n if i >= max_iter or X.train.epochs_completed >= max_epochs:\n print(\"final avg cost (@ step {} = epoch {}): {}\".format(\n i, X.train.epochs_completed, err_train / i), flush=True)\n\n now = datetime.now().isoformat()[11:]\n print(\"------- Training end: {} -------\\n\".format(now), flush=True)\n\n if save:\n outfile = os.path.join(os.path.abspath(outdir), \"inception3_{}\".format(\"_\".join(['dropout', str(self.dropout)])))\n saver.save(self.sesh, outfile, global_step=None)\n try:\n self.train_logger.flush()\n self.train_logger.close()\n self.valid_logger.flush()\n self.valid_logger.close()\n\n except(AttributeError): # not logging\n continue\n break\n\n except(KeyboardInterrupt):\n print(\"final avg cost (@ step {} = epoch {}): {}\".format(\n i, X.train.epochs_completed, err_train / i), flush=True)\n\n now = datetime.now().isoformat()[11:]\n print(\"------- Training end: {} -------\\n\".format(now), flush=True)\n\n if save:\n outfile = os.path.join(os.path.abspath(outdir), \"inception3_{}\".format(\"_\".join(['dropout', str(self.dropout)])))\n saver.save(self.sesh, outfile, global_step=None)\n try:\n self.train_logger.flush()\n self.train_logger.close()\n self.valid_logger.flush()\n self.valid_logger.close()\n\n\n\n except(AttributeError): # not logging\n print('Not logging', flush=True)\n\n sys.exit(0)\n\n\n" ]
[ [ "tensorflow.placeholder_with_default", "tensorflow.placeholder", "tensorflow.summary.scalar", "tensorflow.reshape", "tensorflow.summary.merge_all", "tensorflow.global_variables_initializer", "tensorflow.global_variables", "tensorflow.cast", "tensorflow.contrib.layers.optimize_loss", "tensorflow.Session", "tensorflow.Variable", "tensorflow.losses.softmax_cross_entropy", "tensorflow.summary.FileWriter", "tensorflow.nn.softmax", "tensorflow.add_to_collection", "tensorflow.train.import_meta_graph" ] ]
glamod/glamod-nuim
[ "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c" ]
[ "source_convert_IFF_code/321/DWD_oseas_data_extract_ws_pacific.py" ]
[ "\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 19 09:02:55 2019\r\n\r\n@author: 67135099\r\n\"\"\"\r\n##reads all the files in the current working directory, make sure only input files are in folde\r\nimport os\r\n#import numpy as np \r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/\") \r\nfiles = [ f for f in os.listdir( os.curdir ) if os.path.isfile(f) ]\r\nfiles\r\nfirstFile = files[0]\r\nfirstFile\r\n\r\n#Checks all the files structure that all of the lines in the file are of length 403\r\n##################################################################################################\r\ncount = 0\r\nmaxLength = 0\r\n\r\nminLength = 100000\r\nwith open(firstFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n count +=1\r\n if len(line) < minLength:\r\n minLength = len(line)\r\n if len(line) > maxLength:\r\n maxLength = len(line)\r\n print(\"Count =\", count)\r\n print(\"Max = \", maxLength)\r\n print(\"Min = \", minLength)\r\n #Now going to extract some data from the file and see what is there. \r\n##################################################################################################### \r\n\r\n########################################################################################################\r\n#Now we can combine all of the data from all of the files into a single data frame\r\n\r\nimport pandas as pd\r\ngiantListOfDictionaries = []\r\nfor currentFile in files:\r\n with open(currentFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n field1 = line[21:29]\r\n field2 = line[54:58]\r\n field3 = line[58:60]\r\n field4 = line[60:62]\r\n field5 = line[65:67]\r\n field6 = line[68:70]\r\n###needd to calculate decimals offline and input here\r\n field7 = \"NULL\"\r\n field8 = \"NULL\"\r\n field9 = line[48:51]\r\n field10 = line[122:125]\r\n field11 = line[126:127]\r\n field12 = \"NULL\"\r\n field13 = \"NULL\"\r\n field14 = \"NULL\"\r\n field15 = \"321\"\r\n field16 = \"NULL\"\r\n field17 = line[0:20]\r\n field18 = \"NULL\"\r\n field19 = \"NULL\"\r\n \r\n\r\n \r\n currentDictionary = {\"File_name\": currentFile,'Station_ID': field1, \r\n \"Year\": field2,\"Month\": field3,\"Day\": field4,\"Hour\": field5,\"Minute\":field6,\r\n \"Latitude\":field7,\"Longitude\":field8,\"Elevation\":field9,\"Observed_value\":field10,\r\n \"Source_QC_flag\":field11,\"Original_observed_value\":field12,\r\n \"Original_observed_value_units\":field13,\r\n \"Gravity_corrected_by_source\":field14,\r\n \"Source_ID\":field15,\r\n \"Report_type_code\":field16,\"Station_name\":field17,\r\n \"Alias_station_name\":field18,\"Homogenization_corrected_by_source\":field19}\r\n giantListOfDictionaries.append(currentDictionary)\r\n \r\n#length of file\r\nlen(giantListOfDictionaries)\r\n\r\n\r\n#create a dataframe from dictionary\r\ngiantDataFrame = pd.DataFrame(giantListOfDictionaries)\r\n\r\n##giantDataFrame\r\n#Delete the unwanted first column File name\r\n#giantDataFrame=giantDataFrame.drop(\"File_name\",axis=1)\r\n\r\n\r\n########################################################################################### \r\n#replace missing data values in the dataframe with NUlL\r\ngiantDataFrame['Observed_value'] = pd.to_numeric(giantDataFrame['Observed_value'], errors='coerce')\r\ngiantDataFrame = giantDataFrame.drop([giantDataFrame.index[0]])\r\n#giantDataFrame['Observed_value'] = giantDataFrame['Observed_value']/10\r\ngiantDataFrame[\"Timestamp2\"] = giantDataFrame[\"Year\"].map(str) + \"-\" + giantDataFrame[\"Month\"].map(str)+ \"-\" + giantDataFrame[\"Day\"].map(str) \r\ngiantDataFrame[\"Timestamp\"] = giantDataFrame[\"Timestamp2\"].map(str)+ \" \" + giantDataFrame[\"Hour\"].map(str)+\":\"+giantDataFrame[\"Minute\"].map(str) \r\n\r\n########################################################################################\r\n##add in lat/long from a list matchinh station ids\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output\")\r\nstn_list = pd.read_csv(\"station_list.csv\")\r\ngiantDataFrame=giantDataFrame.merge(stn_list, on='Station_ID', how='left')\r\ngiantDataFrame=giantDataFrame.drop(\"Latitude_x\",axis=1)\r\ngiantDataFrame=giantDataFrame.drop(\"Longitude_x\",axis=1)\r\ngiantDataFrame[\"Longitude\"] = giantDataFrame[\"Longitude_y\"]\r\ngiantDataFrame[\"Latitude\"] = giantDataFrame[\"Latitude_y\"]\r\n##################################################################################\r\n###set order of cloumns headers in dataframe\r\ngiantDataFrame=giantDataFrame[[\"Source_ID\",'Station_ID',\"Station_name\",\"Alias_station_name\", \r\n \"Year\",\"Month\",\"Day\",\"Hour\",\"Minute\",\r\n \"Latitude\",\"Longitude\",\"Elevation\",\"Observed_value\",\r\n \"Source_QC_flag\",\"Original_observed_value\",\r\n \"Original_observed_value_units\",\r\n \"Gravity_corrected_by_source\",\r\n \"Homogenization_corrected_by_source\",\r\n \"Report_type_code\",\"Timestamp\"]]\r\n#####################################################################\r\n##'''strip leading and trailing space in each column'''\r\ngiantDataFrame['Source_ID'] = giantDataFrame['Source_ID'].str.strip() \r\ngiantDataFrame['Station_ID'] = giantDataFrame['Station_ID'].str.strip() \r\ngiantDataFrame['Station_name'] = giantDataFrame['Station_name'].str.strip()\r\ngiantDataFrame['Alias_station_name'] = giantDataFrame['Alias_station_name'].str.strip()\r\ngiantDataFrame['Year'] = giantDataFrame['Year'].str.strip()\r\ngiantDataFrame['Month'] = giantDataFrame['Month'].str.strip()\r\ngiantDataFrame['Day'] = giantDataFrame['Day'].str.strip()\r\ngiantDataFrame['Hour'] = giantDataFrame['Hour'].str.strip()\r\ngiantDataFrame['Minute'] = giantDataFrame['Minute'].str.strip()\r\n#giantDataFrame['Latitude'] = giantDataFrame['Latitude'].str.strip()\r\n#giantDataFrame['Longitude'] = giantDataFrame['Longitude'].str.strip()\r\ngiantDataFrame['Elevation'] = giantDataFrame['Elevation'].str.strip()\r\n#giantDataFrame['Observed_value'] = giantDataFrame['Observed_value'].str.strip()\r\ngiantDataFrame['Source_QC_flag'] = giantDataFrame['Source_QC_flag'].str.strip()\r\n#giantDataFrame['Original_observed_value'] = giantDataFrame['Original_observed'].str.strip()\r\ngiantDataFrame['Original_observed_value_units'] = giantDataFrame['Original_observed_value_units'].str.strip()\r\ngiantDataFrame['Gravity_corrected_by_source'] = giantDataFrame['Gravity_corrected_by_source'].str.strip()\r\ngiantDataFrame['Homogenization_corrected_by_source'] = giantDataFrame['Homogenization_corrected_by_source'].str.strip()\r\ngiantDataFrame['Report_type_code'] = giantDataFrame['Report_type_code'].str.strip()\r\n\r\n########################################################################################################\r\n\r\n\r\n\r\n############################################################\r\n#write one large pipe delimited file with all stations combined if same station named by station_id+ variable name\r\n \r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#.to_csv('CHN01000_station_level_pressure_321.psv',sep='|',index=False)\r\n\r\n####################to csv by unique staion id\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output/Wind_speed\")\r\ncats = sorted(giantDataFrame['Station_ID'].unique())\r\nfor cat in cats:\r\n outfilename = cat + \"_wind_speed_321.psv\"\r\n print(outfilename)\r\n giantDataFrame[giantDataFrame[\"Station_ID\"] == cat].to_csv(outfilename,sep='|',index=False)\r\n############################################################\r\n##write out separate pipe delimited files by station id\r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#stationList= list(set(stationsAsBigList))\r\n#for station in stationList:\r\n # print(type(station))\r\n # currentDataFrame = giantDataFrame[giantDataFrame['Station_ID'] == station]\r\n # currentDataFrame.to_csv(station + \"_pressure.csv\",sep=\",\",index=False)\r\n\r\n\r\n" ]
[ [ "pandas.to_numeric", "pandas.DataFrame", "pandas.read_csv" ] ]
TragerJoswig-Jones/dvoc_model
[ "fb5d369096e436b2e4a518c4f16c3493e36aadfc" ]
[ "dvoc_model/droop.py" ]
[ "from math import pi, sin, cos\nimport numpy as np\n\nfrom dvoc_model.reference_frames import SinCos, Abc, Dq0, AlphaBeta\nfrom dvoc_model.constants import *\nfrom dvoc_model.simulate import simulate, shift_controller_angle_half\nfrom dvoc_model.elements import Node, RefFrames\nfrom dvoc_model.calculations import calculate_power\n\n\nclass Droop(Node):\n def __init__(self,\n p_ref: float,\n q_ref: float,\n m_p: float = 2.6e-3,\n m_q: float = 5.0e-3,\n v_nom: float = 120.,\n hz_nom: float = 60,\n varphi: float = pi / 2,\n omega_c: float = 2 * pi * 30,\n ref: RefFrames = RefFrames.POLAR,\n dt: float = 1.0 / 10e3,\n start_eq: bool = True,\n ):\n\n # set droop controller parameters\n self.v_nom = v_nom\n self.omega_nom = 2 * pi * hz_nom\n self.omega_c = omega_c\n self.m_p = m_p\n self.m_q = m_q\n self.sin_phi = sin(varphi)\n self.cos_phi = cos(varphi)\n\n self.p_ref = p_ref\n self.q_ref = q_ref\n self.dt = dt\n self.line = None\n\n # set low-pass filter initial values\n p_filt = 0\n q_filt = 0\n self.p = 0\n self.q = 0\n\n # initialize state variables\n if ref is RefFrames.POLAR:\n super().__init__((self.v_nom, 0, p_filt, q_filt), ref)\n else:\n v = AlphaBeta.from_polar(self.v_nom, 0)\n super().__init__((v.alpha, v.beta, p_filt, q_filt), ref)\n\n if start_eq:\n shift_controller_angle_half(self, self.ref, self.omega_nom, self.dt)\n\n if ref is RefFrames.POLAR:\n self.state_names = [\"v\", \"theta\", \"p,filt\", \"q,filt\"]\n else:\n self.state_names = [\"v,alpha\", \"v,beta\", \"p,filt\", \"q,filt\"]\n\n def low_pass_dynamics(self, x, y_filt): # TODO: Search how to derive discretization of low pass\n return self.omega_c * (x - y_filt)\n\n def update_states(self):\n self.v = self.states[0]\n self.theta = self.states[1]\n self.p_filt = self.states[2]\n self.q_filt = self.states[3]\n\n def alpha_beta_dynamics(self, x=None, t=None, u=None):\n # Power Calculation\n v = AlphaBeta.from_polar(self.v, self.theta)\n i = self.line.i\n p_calc = 1.5 * (v.alpha * i.alpha + v.beta * i.beta)\n q_calc = 1.5 * (v.beta * i.alpha - v.alpha * i.beta)\n\n self.p = p_calc\n self.q = q_calc\n\n # Low-Pass Filter\n p_filt_dt = self.low_pass_dynamics(p_calc, self.p_filt)\n q_filt_dt = self.low_pass_dynamics(q_calc, self.q_filt)\n\n p_err = self.p_filt - self.p_ref\n q_err = self.q_filt - self.q_ref\n\n # Droop Control\n dadt = None\n dbdt = None\n\n dvdt = dbdt # TODO: Implement this?\n omega = dadt # Todo: Implement this?\n return np.array([dvdt, omega, p_filt_dt, q_filt_dt])\n\n def polar_dynamics(self, x=None, t=None, u=None):\n # Power Calculation\n if x is None:\n x = self.states[:, 0]\n v_ab = AlphaBeta.from_polar(x[0], x[1])\n v = x[0]\n theta = x[1]\n p_filt = x[2]\n q_filt = x[3]\n\n i = self.line.i_alpha_beta()\n p, q = calculate_power(v_ab, i)\n\n # Low-Pass Filter\n p_filt_dt = self.low_pass_dynamics(p, p_filt)\n q_filt_dt = self.low_pass_dynamics(q, q_filt)\n\n p_err = p_filt - self.p_ref\n q_err = q_filt - self.q_ref\n\n # Droop Control\n dvdt = (self.v_nom - self.m_q * q) - v\n omega = self.omega_nom - self.m_p * p_err\n return np.array([dvdt, omega, p_filt_dt, q_filt_dt])\n\n\nif __name__ == \"__main__\":\n import numpy as np\n from matplotlib import pyplot as plt\n\n v_nom = 120\n omega_c = 2*pi*30 # Changing this value changes how quickly P & Q filt reach the average cycle values\n q_ = 0\n\n # grid parameters\n grid = Dq0(SQRT_2*v_nom, 0, 0)\n grid_omega = TWO_PI * 60\n\n # simulation time parameters\n dt = 1 / 10e3\n t = 1000e-3\n ts = np.arange(0, t, dt)\n steps = len(ts)\n\n # create a step function for dispatch (3A to 6A)\n q_ref = q_ * np.ones(steps)\n\n p_ref = 0 * np.ones(steps)\n\n p_ref[len(ts)//8:] = 250 # Add a step in the Active Power reference\n p_ref[len(ts)//4:] = 500 # Add a step in the Active Power reference\n p_ref[len(ts)//2:] = 750 # Add a step in the Active Power reference\n\n controller = Droop(0., 0.)\n data = simulate(controller, p_ref, q_ref, dt, t, Rf=0.8)#, id0=1.93, iq0=-1.23)\n\n plt.show()\n" ]
[ [ "numpy.arange", "numpy.ones", "matplotlib.pyplot.show", "numpy.array" ] ]
KerasKorea/YOLK_ObjectDetector
[ "78a260746c50508fcdf1d0c56a7d4f5373b1f5bf" ]
[ "keras_ssd/data_generator/object_detection_2d_data_generator.py" ]
[ "'''\nA data generator for 2D object detection.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport inspect\nfrom collections import defaultdict\nimport warnings\nimport sklearn.utils\nfrom copy import deepcopy\nfrom PIL import Image\nimport cv2\nimport csv\nimport os\nimport sys\nfrom tqdm import tqdm, trange\ntry:\n import h5py\nexcept ImportError:\n warnings.warn(\"'h5py' module is missing. The fast HDF5 dataset option will be unavailable.\")\ntry:\n import json\nexcept ImportError:\n warnings.warn(\"'json' module is missing. The JSON-parser will be unavailable.\")\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n warnings.warn(\"'BeautifulSoup' module is missing. The XML-parser will be unavailable.\")\ntry:\n import pickle\nexcept ImportError:\n warnings.warn(\"'pickle' module is missing. You won't be able to save parsed file lists and annotations as pickled files.\")\n\nfrom keras_ssd.ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom keras_ssd.data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter\n\nclass DegenerateBatchError(Exception):\n '''\n An exception class to be raised if a generated batch ends up being degenerate,\n e.g. if a generated batch is empty.\n '''\n pass\n\nclass DatasetError(Exception):\n '''\n An exception class to be raised if a anything is wrong with the dataset,\n in particular if you try to generate batches when no dataset was loaded.\n '''\n pass\n\nclass DataGenerator:\n '''\n A generator to generate batches of samples and corresponding labels indefinitely.\n\n Can shuffle the dataset consistently after each complete pass.\n\n Currently provides three methods to parse annotation data: A general-purpose CSV parser,\n an XML parser for the Pascal VOC datasets, and a JSON parser for the MS COCO datasets.\n If the annotations of your dataset are in a format that is not supported by these parsers,\n you could just add another parser method and still use this generator.\n\n Can perform image transformations for data conversion and data augmentation,\n for details please refer to the documentation of the `generate()` method.\n '''\n\n def __init__(self,\n load_images_into_memory=False,\n hdf5_dataset_path=None,\n filenames=None,\n filenames_type='text',\n images_dir=None,\n labels=None,\n image_ids=None,\n eval_neutral=None,\n labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),\n verbose=True):\n '''\n Initializes the data generator. You can either load a dataset directly here in the constructor,\n e.g. an HDF5 dataset, or you can use one of the parser methods to read in a dataset.\n\n Arguments:\n load_images_into_memory (bool, optional): If `True`, the entire dataset will be loaded into memory.\n This enables noticeably faster data generation than loading batches of images into memory ad hoc.\n Be sure that you have enough memory before you activate this option.\n hdf5_dataset_path (str, optional): The full file path of an HDF5 file that contains a dataset in the\n format that the `create_hdf5_dataset()` method produces. If you load such an HDF5 dataset, you\n don't need to use any of the parser methods anymore, the HDF5 dataset already contains all relevant\n data.\n filenames (string or list, optional): `None` or either a Python list/tuple or a string representing\n a filepath. If a list/tuple is passed, it must contain the file names (full paths) of the\n images to be used. Note that the list/tuple must contain the paths to the images,\n not the images themselves. If a filepath string is passed, it must point either to\n (1) a pickled file containing a list/tuple as described above. In this case the `filenames_type`\n argument must be set to `pickle`.\n Or\n (2) a text file. Each line of the text file contains the file name (basename of the file only,\n not the full directory path) to one image and nothing else. In this case the `filenames_type`\n argument must be set to `text` and you must pass the path to the directory that contains the\n images in `images_dir`.\n filenames_type (string, optional): In case a string is passed for `filenames`, this indicates what\n type of file `filenames` is. It can be either 'pickle' for a pickled file or 'text' for a\n plain text file.\n images_dir (string, optional): In case a text file is passed for `filenames`, the full paths to\n the images will be composed from `images_dir` and the names in the text file, i.e. this\n should be the directory that contains the images to which the text file refers.\n If `filenames_type` is not 'text', then this argument is irrelevant.\n labels (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain Numpy arrays\n that represent the labels of the dataset.\n image_ids (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain the image\n IDs of the images in the dataset.\n eval_neutral (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain for each image\n a list that indicates for each ground truth object in the image whether that object is supposed\n to be treated as neutral during an evaluation.\n labels_output_format (list, optional): A list of five strings representing the desired order of the five\n items class ID, xmin, ymin, xmax, ymax in the generated ground truth data (if any). The expected\n strings are 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.\n verbose (bool, optional): If `True`, prints out the progress for some constructor operations that may\n take a bit longer.\n '''\n self.labels_output_format = labels_output_format\n self.labels_format={'class_id': labels_output_format.index('class_id'),\n 'xmin': labels_output_format.index('xmin'),\n 'ymin': labels_output_format.index('ymin'),\n 'xmax': labels_output_format.index('xmax'),\n 'ymax': labels_output_format.index('ymax')} # This dictionary is for internal use.\n\n self.dataset_size = 0 # As long as we haven't loaded anything yet, the dataset size is zero.\n self.load_images_into_memory = load_images_into_memory\n self.images = None # The only way that this list will not stay `None` is if `load_images_into_memory == True`.\n\n # `self.filenames` is a list containing all file names of the image samples (full paths).\n # Note that it does not contain the actual image files themselves. This list is one of the outputs of the parser methods.\n # In case you are loading an HDF5 dataset, this list will be `None`.\n if not filenames is None:\n if isinstance(filenames, (list, tuple)):\n self.filenames = filenames\n elif isinstance(filenames, str):\n with open(filenames, 'rb') as f:\n if filenames_type == 'pickle':\n self.filenames = pickle.load(f)\n elif filenames_type == 'text':\n self.filenames = [os.path.join(images_dir, line.strip()) for line in f]\n else:\n raise ValueError(\"`filenames_type` can be either 'text' or 'pickle'.\")\n else:\n raise ValueError(\"`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.\")\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n else:\n self.filenames = None\n\n # In case ground truth is available, `self.labels` is a list containing for each image a list (or NumPy array)\n # of ground truth bounding boxes for that image.\n if not labels is None:\n if isinstance(labels, str):\n with open(labels, 'rb') as f:\n self.labels = pickle.load(f)\n elif isinstance(labels, (list, tuple)):\n self.labels = labels\n else:\n raise ValueError(\"`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.labels = None\n\n if not image_ids is None:\n if isinstance(image_ids, str):\n with open(image_ids, 'rb') as f:\n self.image_ids = pickle.load(f)\n elif isinstance(image_ids, (list, tuple)):\n self.image_ids = image_ids\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.image_ids = None\n\n if not eval_neutral is None:\n if isinstance(eval_neutral, str):\n with open(eval_neutral, 'rb') as f:\n self.eval_neutral = pickle.load(f)\n elif isinstance(eval_neutral, (list, tuple)):\n self.eval_neutral = eval_neutral\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.eval_neutral = None\n\n if not hdf5_dataset_path is None:\n self.hdf5_dataset_path = hdf5_dataset_path\n self.load_hdf5_dataset(verbose=verbose)\n else:\n self.hdf5_dataset = None\n\n def load_hdf5_dataset(self, verbose=True):\n '''\n Loads an HDF5 dataset that is in the format that the `create_hdf5_dataset()` method\n produces.\n\n Arguments:\n verbose (bool, optional): If `True`, prints out the progress while loading\n the dataset.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.\n\n if self.load_images_into_memory:\n self.images = []\n if verbose: tr = trange(self.dataset_size, desc='Loading images into memory', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n\n if self.hdf5_dataset.attrs['has_labels']:\n self.labels = []\n labels = self.hdf5_dataset['labels']\n label_shapes = self.hdf5_dataset['label_shapes']\n if verbose: tr = trange(self.dataset_size, desc='Loading labels', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.labels.append(labels[i].reshape(label_shapes[i]))\n\n if self.hdf5_dataset.attrs['has_image_ids']:\n self.image_ids = []\n image_ids = self.hdf5_dataset['image_ids']\n if verbose: tr = trange(self.dataset_size, desc='Loading image IDs', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.image_ids.append(image_ids[i])\n\n if self.hdf5_dataset.attrs['has_eval_neutral']:\n self.eval_neutral = []\n eval_neutral = self.hdf5_dataset['eval_neutral']\n if verbose: tr = trange(self.dataset_size, desc='Loading evaluation-neutrality annotations', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.eval_neutral.append(eval_neutral[i])\n\n def parse_csv(self,\n images_dir,\n labels_filename,\n input_format,\n include_classes='all',\n random_sample=False,\n ret=False,\n verbose=True):\n '''\n Arguments:\n images_dir (str): The path to the directory that contains the images.\n labels_filename (str): The filepath to a CSV file that contains one ground truth bounding box per line\n and each line contains the following six items: image file name, class ID, xmin, xmax, ymin, ymax.\n The six items do not have to be in a specific order, but they must be the first six columns of\n each line. The order of these items in the CSV file must be specified in `input_format`.\n The class ID is an integer greater than zero. Class ID 0 is reserved for the background class.\n `xmin` and `xmax` are the left-most and right-most absolute horizontal coordinates of the box,\n `ymin` and `ymax` are the top-most and bottom-most absolute vertical coordinates of the box.\n The image name is expected to be just the name of the image file without the directory path\n at which the image is located.\n input_format (list): A list of six strings representing the order of the six items\n image file name, class ID, xmin, xmax, ymin, ymax in the input CSV file. The expected strings\n are 'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n random_sample (float, optional): Either `False` or a float in `[0,1]`. If this is `False`, the\n full dataset will be used by the generator. If this is a float in `[0,1]`, a randomly sampled\n fraction of the dataset will be used, where `random_sample` is the fraction of the dataset\n to be used. For example, if `random_sample = 0.2`, 20 precent of the dataset will be randomly selected,\n the rest will be ommitted. The fraction refers to the number of images, not to the number\n of boxes, i.e. each image that will be added to the dataset will always be added with all\n of its boxes.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, and image IDs.\n '''\n\n # Set class members.\n self.images_dir = images_dir\n self.labels_filename = labels_filename\n self.input_format = input_format\n self.include_classes = include_classes\n\n # Before we begin, make sure that we have a labels_filename and an input_format\n if self.labels_filename is None or self.input_format is None:\n raise ValueError(\"`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.\")\n\n # Erase data that might have been parsed before\n self.filenames = []\n self.image_ids = []\n self.labels = []\n\n # First, just read in the CSV file lines and sort them.\n\n data = []\n\n with open(self.labels_filename, newline='') as csvfile:\n csvread = csv.reader(csvfile, delimiter=',')\n next(csvread) # Skip the header row.\n for row in csvread: # For every line (i.e for every bounding box) in the CSV file...\n if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...\n box = [] # Store the box class and coordinates here\n box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`\n for element in self.labels_output_format: # For each element in the output format (where the elements are the class ID and the four box coordinates)...\n box.append(int(row[self.input_format.index(element)].strip())) # ...select the respective column in the input format and append it to `box`.\n data.append(box)\n\n data = sorted(data) # The data needs to be sorted, otherwise the next step won't give the correct result\n\n # Now that we've made sure that the data is sorted by file names,\n # we can compile the actual samples and labels lists\n\n current_file = data[0][0] # The current image for which we're collecting the ground truth boxes\n current_image_id = data[0][0].split('.')[0] # The image ID will be the portion of the image name before the first dot.\n current_labels = [] # The list where we collect all ground truth boxes for a given image\n add_to_dataset = False\n for i, box in enumerate(data):\n\n if box[0] == current_file: # If this box (i.e. this line of the CSV file) belongs to the current image file\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else: # If this box belongs to a new image file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n current_labels = [] # Reset the labels list because this is a new file.\n current_file = box[0]\n current_image_id = box[0].split('.')[0]\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret: # In case we want to return these\n return self.images, self.filenames, self.labels, self.image_ids\n\n def parse_xml(self,\n images_dirs,\n image_set_filenames,\n annotations_dirs=[],\n classes=['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'],\n include_classes = 'all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False,\n verbose=True):\n '''\n This is an XML parser for the Pascal VOC datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the data format and XML tags of the Pascal VOC datasets.\n\n Arguments:\n images_dirs (list): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for Pascal VOC 2007, another that contains\n the images for Pascal VOC 2012, etc.).\n image_set_filenames (list): A list of strings, where each string is the path of the text file with the image\n set to be loaded. Must be one file per image directory given. These text files define what images in the\n respective image directories are to be part of the dataset and simply contains one image ID per line\n and nothing else.\n annotations_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains the annotations (XML files) that belong to the images in the respective image directories given.\n The directories must contain one XML file per image and the name of an XML file must be the image ID\n of the image it belongs to. The content of the XML files must be in the Pascal VOC format.\n classes (list, optional): A list containing the names of the object classes as found in the\n `name` XML tags. Must include the class `background` as the first list item. The order of this list\n defines the class IDs.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n exclude_truncated (bool, optional): If `True`, excludes boxes that are labeled as 'truncated'.\n exclude_difficult (bool, optional): If `True`, excludes boxes that are labeled as 'difficult'.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, image IDs,\n and a list indicating which boxes are annotated with the label \"difficult\".\n '''\n # Set class members.\n self.images_dirs = images_dirs\n self.annotations_dirs = annotations_dirs\n self.image_set_filenames = image_set_filenames\n self.classes = classes\n self.include_classes = include_classes\n\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n self.eval_neutral = []\n if not annotations_dirs:\n self.labels = None\n self.eval_neutral = None\n annotations_dirs = [None] * len(images_dirs)\n\n for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):\n # Read the image set file that so that we know all the IDs of all the images to be included in the dataset.\n with open(image_set_filename) as f:\n image_ids = [line.strip() for line in f] # Note: These are strings, not integers.\n self.image_ids += image_ids\n\n if verbose: it = tqdm(image_ids, desc=\"Processing image set '{}'\".format(os.path.basename(image_set_filename)), file=sys.stdout)\n else: it = image_ids\n\n # Loop over all images in this dataset.\n for image_id in it:\n\n filename = '{}'.format(image_id) + '.jpg'\n self.filenames.append(os.path.join(images_dir, filename))\n\n if not annotations_dir is None:\n # Parse the XML file for this image.\n with open(os.path.join(annotations_dir, image_id + '.xml')) as f:\n soup = BeautifulSoup(f, 'xml')\n\n folder = soup.folder.text # In case we want to return the folder in addition to the image file name. Relevant for determining which dataset an image belongs to.\n #filename = soup.filename.text\n\n boxes = [] # We'll store all boxes for this image here.\n eval_neutr = [] # We'll store whether a box is annotated as \"difficult\" here.\n objects = soup.find_all('object') # Get a list of all objects in this image.\n\n # Parse the data for each object.\n for obj in objects:\n class_name = obj.find('name', recursive=False).text\n class_id = self.classes.index(class_name)\n # Check whether this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue\n pose = obj.find('pose', recursive=False).text\n truncated = int(obj.find('truncated', recursive=False).text)\n if exclude_truncated and (truncated == 1): continue\n difficult = int(obj.find('difficult', recursive=False).text)\n if exclude_difficult and (difficult == 1): continue\n # Get the bounding box coordinates.\n bndbox = obj.find('bndbox', recursive=False)\n xmin = int(bndbox.xmin.text)\n ymin = int(bndbox.ymin.text)\n xmax = int(bndbox.xmax.text)\n ymax = int(bndbox.ymax.text)\n item_dict = {'folder': folder,\n 'image_name': filename,\n 'image_id': image_id,\n 'class_name': class_name,\n 'class_id': class_id,\n 'pose': pose,\n 'truncated': truncated,\n 'difficult': difficult,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n if difficult: eval_neutr.append(True)\n else: eval_neutr.append(False)\n\n self.labels.append(boxes)\n self.eval_neutral.append(eval_neutr)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def parse_json(self,\n images_dirs,\n annotations_filenames,\n ground_truth_available=False,\n include_classes='all',\n ret=False,\n verbose=True):\n '''\n This is an JSON parser for the MS COCO datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the JSON format of the MS COCO datasets.\n\n Arguments:\n images_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for MS COCO Train 2014, another one for MS COCO\n Val 2014, another one for MS COCO Train 2017 etc.).\n annotations_filenames (list): A list of strings, where each string is the path of the JSON file\n that contains the annotations for the images in the respective image directories given, i.e. one\n JSON file per image directory that contains the annotations for all images in that directory.\n The content of the JSON files must be in MS COCO object detection format. Note that these annotations\n files do not necessarily need to contain ground truth information. MS COCO also provides annotations\n files without ground truth information for the test datasets, called `image_info_[...].json`.\n ground_truth_available (bool, optional): Set `True` if the annotations files contain ground truth information.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels and image IDs.\n '''\n self.images_dirs = images_dirs\n self.annotations_filenames = annotations_filenames\n self.include_classes = include_classes\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n if not ground_truth_available:\n self.labels = None\n\n # Build the dictionaries that map between class names and class IDs.\n with open(annotations_filenames[0], 'r') as f:\n annotations = json.load(f)\n # Unfortunately the 80 MS COCO class IDs are not all consecutive. They go\n # from 1 to 90 and some numbers are skipped. Since the IDs that we feed\n # into a neural network must be consecutive, we'll save both the original\n # (non-consecutive) IDs as well as transformed maps.\n # We'll save both the map between the original\n self.cats_to_names = {} # The map between class names (values) and their original IDs (keys)\n self.classes_to_names = [] # A list of the class names with their indices representing the transformed IDs\n self.classes_to_names.append('background') # Need to add the background class first so that the indexing is right.\n self.cats_to_classes = {} # A dictionary that maps between the original (keys) and the transformed IDs (values)\n self.classes_to_cats = {} # A dictionary that maps between the transformed (keys) and the original IDs (values)\n for i, cat in enumerate(annotations['categories']):\n self.cats_to_names[cat['id']] = cat['name']\n self.classes_to_names.append(cat['name'])\n self.cats_to_classes[cat['id']] = i + 1\n self.classes_to_cats[i + 1] = cat['id']\n\n # Iterate over all datasets.\n for images_dir, annotations_filename in zip(self.images_dirs, self.annotations_filenames):\n # Load the JSON file.\n with open(annotations_filename, 'r') as f:\n annotations = json.load(f)\n\n if ground_truth_available:\n # Create the annotations map, a dictionary whose keys are the image IDs\n # and whose values are the annotations for the respective image ID.\n image_ids_to_annotations = defaultdict(list)\n for annotation in annotations['annotations']:\n image_ids_to_annotations[annotation['image_id']].append(annotation)\n\n if verbose: it = tqdm(annotations['images'], desc=\"Processing '{}'\".format(os.path.basename(annotations_filename)), file=sys.stdout)\n else: it = annotations['images']\n\n # Loop over all images in this dataset.\n for img in it:\n\n self.filenames.append(os.path.join(images_dir, img['file_name']))\n self.image_ids.append(img['id'])\n\n if ground_truth_available:\n # Get all annotations for this image.\n annotations = image_ids_to_annotations[img['id']]\n boxes = []\n for annotation in annotations:\n cat_id = annotation['category_id']\n # Check if this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not cat_id in self.include_classes): continue\n # Transform the original class ID to fit in the sequence of consecutive IDs.\n class_id = self.cats_to_classes[cat_id]\n xmin = annotation['bbox'][0]\n ymin = annotation['bbox'][1]\n width = annotation['bbox'][2]\n height = annotation['bbox'][3]\n # Compute `xmax` and `ymax`.\n xmax = xmin + width\n ymax = ymin + height\n item_dict = {'image_name': img['file_name'],\n 'image_id': img['id'],\n 'class_id': class_id,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n self.labels.append(boxes)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids\n\n def create_hdf5_dataset(self,\n file_path='dataset.h5',\n resize=False,\n variable_image_size=True,\n verbose=True):\n '''\n Converts the currently loaded dataset into a HDF5 file. This HDF5 file contains all\n images as uncompressed arrays in a contiguous block of memory, which allows for them\n to be loaded faster. Such an uncompressed dataset, however, may take up considerably\n more space on your hard drive than the sum of the source images in a compressed format\n such as JPG or PNG.\n\n It is recommended that you always convert the dataset into an HDF5 dataset if you\n have enugh hard drive space since loading from an HDF5 dataset accelerates the data\n generation noticeably.\n\n Note that you must load a dataset (e.g. via one of the parser methods) before creating\n an HDF5 dataset from it.\n\n The created HDF5 dataset will remain open upon its creation so that it can be used right\n away.\n\n Arguments:\n file_path (str, optional): The full file path under which to store the HDF5 dataset.\n You can load this output file via the `DataGenerator` constructor in the future.\n resize (tuple, optional): `False` or a 2-tuple `(height, width)` that represents the\n target size for the images. All images in the dataset will be resized to this\n target size before they will be written to the HDF5 file. If `False`, no resizing\n will be performed.\n variable_image_size (bool, optional): The only purpose of this argument is that its\n value will be stored in the HDF5 dataset in order to be able to quickly find out\n whether the images in the dataset all have the same size or not.\n verbose (bool, optional): Whether or not prit out the progress of the dataset creation.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset_path = file_path\n\n dataset_size = len(self.filenames)\n\n # Create the HDF5 file.\n hdf5_dataset = h5py.File(file_path, 'w')\n\n # Create a few attributes that tell us what this dataset contains.\n # The dataset will obviously always contain images, but maybe it will\n # also contain labels, image IDs, etc.\n hdf5_dataset.attrs.create(name='has_labels', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_image_ids', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_eval_neutral', data=False, shape=None, dtype=np.bool_)\n # It's useful to be able to quickly check whether the images in a dataset all\n # have the same size or not, so add a boolean attribute for that.\n if variable_image_size and not resize:\n hdf5_dataset.attrs.create(name='variable_image_size', data=True, shape=None, dtype=np.bool_)\n else:\n hdf5_dataset.attrs.create(name='variable_image_size', data=False, shape=None, dtype=np.bool_)\n\n # Create the dataset in which the images will be stored as flattened arrays.\n # This allows us, among other things, to store images of variable size.\n hdf5_images = hdf5_dataset.create_dataset(name='images',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.uint8))\n\n # Create the dataset that will hold the image heights, widths and channels that\n # we need in order to reconstruct the images from the flattened arrays later.\n hdf5_image_shapes = hdf5_dataset.create_dataset(name='image_shapes',\n shape=(dataset_size, 3),\n maxshape=(None, 3),\n dtype=np.int32)\n\n if not (self.labels is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_labels = hdf5_dataset.create_dataset(name='labels',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.int32))\n\n # Create the dataset that will hold the dimensions of the labels arrays for\n # each image so that we can restore the labels from the flattened arrays later.\n hdf5_label_shapes = hdf5_dataset.create_dataset(name='label_shapes',\n shape=(dataset_size, 2),\n maxshape=(None, 2),\n dtype=np.int32)\n\n hdf5_dataset.attrs.modify(name='has_labels', value=True)\n\n if not (self.image_ids is None):\n\n hdf5_image_ids = hdf5_dataset.create_dataset(name='image_ids',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=str))\n\n hdf5_dataset.attrs.modify(name='has_image_ids', value=True)\n\n if not (self.eval_neutral is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_eval_neutral = hdf5_dataset.create_dataset(name='eval_neutral',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.bool_))\n\n hdf5_dataset.attrs.modify(name='has_eval_neutral', value=True)\n\n if verbose:\n tr = trange(dataset_size, desc='Creating HDF5 dataset', file=sys.stdout)\n else:\n tr = range(dataset_size)\n\n # Iterate over all images in the dataset.\n for i in tr:\n\n # Store the image.\n with Image.open(self.filenames[i]) as image:\n\n image = np.asarray(image, dtype=np.uint8)\n\n # Make sure all images end up having three channels.\n if image.ndim == 2:\n image = np.stack([image] * 3, axis=-1)\n elif image.ndim == 3:\n if image.shape[2] == 1:\n image = np.concatenate([image] * 3, axis=-1)\n elif image.shape[2] == 4:\n image = image[:,:,:3]\n\n if resize:\n image = cv2.resize(image, dsize=(resize[1], resize[0]))\n\n # Flatten the image array and write it to the images dataset.\n hdf5_images[i] = image.reshape(-1)\n # Write the image's shape to the image shapes dataset.\n hdf5_image_shapes[i] = image.shape\n\n # Store the ground truth if we have any.\n if not (self.labels is None):\n\n labels = np.asarray(self.labels[i])\n # Flatten the labels array and write it to the labels dataset.\n hdf5_labels[i] = labels.reshape(-1)\n # Write the labels' shape to the label shapes dataset.\n hdf5_label_shapes[i] = labels.shape\n\n # Store the image ID if we have one.\n if not (self.image_ids is None):\n\n hdf5_image_ids[i] = self.image_ids[i]\n\n # Store the evaluation-neutrality annotations if we have any.\n if not (self.eval_neutral is None):\n\n hdf5_eval_neutral[i] = self.eval_neutral[i]\n\n hdf5_dataset.close()\n self.hdf5_dataset = h5py.File(file_path, 'r')\n self.hdf5_dataset_path = file_path\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset, we will shuffle this index list.\n\n def generate(self,\n batch_size=32,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images', 'encoded_labels'},\n keep_images_without_gt=False,\n degenerate_box_handling='remove'):\n '''\n Generates batches of samples and (optionally) corresponding labels indefinitely.\n\n Can shuffle the samples consistently after each complete pass.\n\n Optionally takes a list of arbitrary image transformations to apply to the\n samples ad hoc.\n\n Arguments:\n batch_size (int, optional): The size of the batches to be generated.\n shuffle (bool, optional): Whether or not to shuffle the dataset before each pass.\n This option should always be `True` during training, but it can be useful to turn shuffling off\n for debugging or if you're using the generator for prediction.\n transformations (list, optional): A list of transformations that will be applied to the images and labels\n in the given order. Each transformation is a callable that takes as input an image (as a Numpy array)\n and optionally labels (also as a Numpy array) and returns an image and optionally labels in the same\n format.\n label_encoder (callable, optional): Only relevant if labels are given. A callable that takes as input the\n labels of a batch (as a list of Numpy arrays) and returns some structure that represents those labels.\n The general use case for this is to convert labels from their input format to a format that a given object\n detection model needs as its training targets.\n returns (set, optional): A set of strings that determines what outputs the generator yields. The generator's output\n is always a tuple that contains the outputs specified in this set and only those. If an output is not available,\n it will be `None`. The output tuple can contain the following outputs according to the specified keyword strings:\n * 'processed_images': An array containing the processed images. Will always be in the outputs, so it doesn't\n matter whether or not you include this keyword in the set.\n * 'encoded_labels': The encoded labels tensor. Will always be in the outputs if a label encoder is given,\n so it doesn't matter whether or not you include this keyword in the set if you pass a label encoder.\n * 'matched_anchors': Only available if `labels_encoder` is an `SSDInputEncoder` object. The same as 'encoded_labels',\n but containing anchor box coordinates for all matched anchor boxes instead of ground truth coordinates.\n This can be useful to visualize what anchor boxes are being matched to each ground truth box. Only available\n in training mode.\n * 'processed_labels': The processed, but not yet encoded labels. This is a list that contains for each\n batch image a Numpy array with all ground truth boxes for that image. Only available if ground truth is available.\n * 'filenames': A list containing the file names (full paths) of the images in the batch.\n * 'image_ids': A list containing the integer IDs of the images in the batch. Only available if there\n are image IDs available.\n * 'evaluation-neutral': A nested list of lists of booleans. Each list contains `True` or `False` for every ground truth\n bounding box of the respective image depending on whether that bounding box is supposed to be evaluation-neutral (`True`)\n or not (`False`). May return `None` if there exists no such concept for a given dataset. An example for\n evaluation-neutrality are the ground truth boxes annotated as \"difficult\" in the Pascal VOC datasets, which are\n usually treated to be neutral in a model evaluation.\n * 'inverse_transform': A nested list that contains a list of \"inverter\" functions for each item in the batch.\n These inverter functions take (predicted) labels for an image as input and apply the inverse of the transformations\n that were applied to the original image to them. This makes it possible to let the model make predictions on a\n transformed image and then convert these predictions back to the original image. This is mostly relevant for\n evaluation: If you want to evaluate your model on a dataset with varying image sizes, then you are forced to\n transform the images somehow (e.g. by resizing or cropping) to make them all the same size. Your model will then\n predict boxes for those transformed images, but for the evaluation you will need predictions with respect to the\n original images, not with respect to the transformed images. This means you will have to transform the predicted\n box coordinates back to the original image sizes. Note that for each image, the inverter functions for that\n image need to be applied in the order in which they are given in the respective list for that image.\n * 'original_images': A list containing the original images in the batch before any processing.\n * 'original_labels': A list containing the original ground truth boxes for the images in this batch before any\n processing. Only available if ground truth is available.\n The order of the outputs in the tuple is the order of the list above. If `returns` contains a keyword for an\n output that is unavailable, that output omitted in the yielded tuples and a warning will be raised.\n keep_images_without_gt (bool, optional): If `False`, images for which there aren't any ground truth boxes before\n any transformations have been applied will be removed from the batch. If `True`, such images will be kept\n in the batch.\n degenerate_box_handling (str, optional): How to handle degenerate boxes, which are boxes that have `xmax <= xmin` and/or\n `ymax <= ymin`. Degenerate boxes can sometimes be in the dataset, or non-degenerate boxes can become degenerate\n after they were processed by transformations. Note that the generator checks for degenerate boxes after all\n transformations have been applied (if any), but before the labels were passed to the `label_encoder` (if one was given).\n Can be one of 'warn' or 'remove'. If 'warn', the generator will merely print a warning to let you know that there\n are degenerate boxes in a batch. If 'remove', the generator will remove degenerate boxes from the batch silently.\n\n Yields:\n The next batch as a tuple of items as defined by the `returns` argument.\n '''\n\n if self.dataset_size == 0:\n raise DatasetError(\"Cannot generate batches because you did not load a dataset.\")\n\n #############################################################################################\n # Warn if any of the set returns aren't possible.\n #############################################################################################\n\n if self.labels is None:\n if any([ret in returns for ret in ['original_labels', 'processed_labels', 'encoded_labels', 'matched_anchors', 'evaluation-neutral']]):\n warnings.warn(\"Since no labels were given, none of 'original_labels', 'processed_labels', 'evaluation-neutral', 'encoded_labels', and 'matched_anchors' \" +\n \"are possible returns, but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif label_encoder is None:\n if any([ret in returns for ret in ['encoded_labels', 'matched_anchors']]):\n warnings.warn(\"Since no label encoder was given, 'encoded_labels' and 'matched_anchors' aren't possible returns, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif not isinstance(label_encoder, SSDInputEncoder):\n if 'matched_anchors' in returns:\n warnings.warn(\"`label_encoder` is not an `SSDInputEncoder` object, therefore 'matched_anchors' is not a possible return, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n\n #############################################################################################\n # Do a few preparatory things like maybe shuffling the dataset initially.\n #############################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n if degenerate_box_handling == 'remove':\n box_filter = BoxFilter(check_overlap=False,\n check_min_area=False,\n check_degenerate=True,\n labels_format=self.labels_format)\n\n # Override the labels formats of all the transformations to make sure they are set correctly.\n if not (self.labels is None):\n for transform in transformations:\n transform.labels_format = self.labels_format\n\n #############################################################################################\n # Generate mini batches.\n #############################################################################################\n\n current = 0\n\n while True:\n\n batch_X, batch_y = [], []\n\n if current >= self.dataset_size:\n current = 0\n\n #########################################################################################\n # Maybe shuffle the dataset if a full pass over the dataset has finished.\n #########################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n #########################################################################################\n # Get the images, (maybe) image IDs, (maybe) labels, etc. for this batch.\n #########################################################################################\n\n # We prioritize our options in the following order:\n # 1) If we have the images already loaded in memory, get them from there.\n # 2) Else, if we have an HDF5 dataset, get the images from there.\n # 3) Else, if we have neither of the above, we'll have to load the individual image\n # files from disk.\n batch_indices = self.dataset_indices[current:current+batch_size]\n if not (self.images is None):\n for i in batch_indices:\n batch_X.append(self.images[i])\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n elif not (self.hdf5_dataset is None):\n for i in batch_indices:\n batch_X.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n else:\n batch_filenames = self.filenames[current:current+batch_size]\n for filename in batch_filenames:\n with Image.open(filename) as image:\n batch_X.append(np.array(image, dtype=np.uint8))\n\n # Get the labels for this batch (if there are any).\n if not (self.labels is None):\n batch_y = deepcopy(self.labels[current:current+batch_size])\n else:\n batch_y = None\n\n if not (self.eval_neutral is None):\n batch_eval_neutral = self.eval_neutral[current:current+batch_size]\n else:\n batch_eval_neutral = None\n\n # Get the image IDs for this batch (if there are any).\n if not (self.image_ids is None):\n batch_image_ids = self.image_ids[current:current+batch_size]\n else:\n batch_image_ids = None\n\n if 'original_images' in returns:\n batch_original_images = deepcopy(batch_X) # The original, unaltered images\n if 'original_labels' in returns:\n batch_original_labels = deepcopy(batch_y) # The original, unaltered labels\n\n current += batch_size\n\n #########################################################################################\n # Maybe perform image transformations.\n #########################################################################################\n\n batch_items_to_remove = [] # In case we need to remove any images from the batch, store their indices in this list.\n batch_inverse_transforms = []\n\n for i in range(len(batch_X)):\n\n if not (self.labels is None):\n # Convert the labels for this image to an array (in case they aren't already).\n batch_y[i] = np.array(batch_y[i])\n # If this image has no ground truth boxes, maybe we don't want to keep it in the batch.\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n # Apply any image transformations we may have received.\n if transformations:\n\n inverse_transforms = []\n\n for transform in transformations:\n\n if not (self.labels is None):\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], batch_y[i], inverse_transform = transform(batch_X[i], batch_y[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i], batch_y[i] = transform(batch_X[i], batch_y[i])\n\n if batch_X[i] is None: # In case the transform failed to produce an output image, which is possible for some random transforms.\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n else:\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], inverse_transform = transform(batch_X[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i] = transform(batch_X[i])\n\n batch_inverse_transforms.append(inverse_transforms[::-1])\n\n #########################################################################################\n # Check for degenerate boxes in this batch item.\n #########################################################################################\n\n if not (self.labels is None):\n\n xmin = self.labels_format['xmin']\n ymin = self.labels_format['ymin']\n xmax = self.labels_format['xmax']\n ymax = self.labels_format['ymax']\n\n if np.any(batch_y[i][:,xmax] - batch_y[i][:,xmin] <= 0) or np.any(batch_y[i][:,ymax] - batch_y[i][:,ymin] <= 0):\n if degenerate_box_handling == 'warn':\n warnings.warn(\"Detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, \".format(i, batch_y[i]) +\n \"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \" +\n \"This could mean that your dataset contains degenerate ground truth boxes, or that any image transformations you may apply might \" +\n \"result in degenerate ground truth boxes, or that you are parsing the ground truth in the wrong coordinate format.\" +\n \"Degenerate ground truth bounding boxes may lead to NaN errors during the training.\")\n elif degenerate_box_handling == 'remove':\n batch_y[i] = box_filter(batch_y[i])\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n\n #########################################################################################\n # Remove any items we might not want to keep from the batch.\n #########################################################################################\n\n if batch_items_to_remove:\n for j in sorted(batch_items_to_remove, reverse=True):\n # This isn't efficient, but it hopefully shouldn't need to be done often anyway.\n batch_X.pop(j)\n batch_filenames.pop(j)\n if batch_inverse_transforms: batch_inverse_transforms.pop(j)\n if not (self.labels is None): batch_y.pop(j)\n if not (self.image_ids is None): batch_image_ids.pop(j)\n if not (self.eval_neutral is None): batch_eval_neutral.pop(j)\n if 'original_images' in returns: batch_original_images.pop(j)\n if 'original_labels' in returns and not (self.labels is None): batch_original_labels.pop(j)\n\n #########################################################################################\n\n # CAUTION: Converting `batch_X` into an array will result in an empty batch if the images have varying sizes\n # or varying numbers of channels. At this point, all images must have the same size and the same\n # number of channels.\n batch_X = np.array(batch_X)\n if (batch_X.size == 0):\n raise DegenerateBatchError(\"You produced an empty batch. This might be because the images in the batch vary \" +\n \"in their size and/or number of channels. Note that after all transformations \" +\n \"(if any were given) have been applied to all images in the batch, all images \" +\n \"must be homogenous in size along all axes.\")\n\n #########################################################################################\n # If we have a label encoder, encode our labels.\n #########################################################################################\n\n if not (label_encoder is None or self.labels is None):\n\n if ('matched_anchors' in returns) and isinstance(label_encoder, SSDInputEncoder):\n batch_y_encoded, batch_matched_anchors = label_encoder(batch_y, diagnostics=True)\n else:\n batch_y_encoded = label_encoder(batch_y, diagnostics=False)\n batch_matched_anchors = None\n\n else:\n batch_y_encoded = None\n batch_matched_anchors = None\n\n #########################################################################################\n # Compose the output.\n #########################################################################################\n\n ret = []\n if 'processed_images' in returns: ret.append(batch_X)\n if 'encoded_labels' in returns: ret.append(batch_y_encoded)\n if 'matched_anchors' in returns: ret.append(batch_matched_anchors)\n if 'processed_labels' in returns: ret.append(batch_y)\n if 'filenames' in returns: ret.append(batch_filenames)\n if 'image_ids' in returns: ret.append(batch_image_ids)\n if 'evaluation-neutral' in returns: ret.append(batch_eval_neutral)\n if 'inverse_transform' in returns: ret.append(batch_inverse_transforms)\n if 'original_images' in returns: ret.append(batch_original_images)\n if 'original_labels' in returns: ret.append(batch_original_labels)\n\n yield ret\n\n def save_dataset(self,\n filenames_path='filenames.pkl',\n labels_path=None,\n image_ids_path=None,\n eval_neutral_path=None):\n '''\n Writes the current `filenames`, `labels`, and `image_ids` lists to the specified files.\n This is particularly useful for large datasets with annotations that are\n parsed from XML files, which can take quite long. If you'll be using the\n same dataset repeatedly, you don't want to have to parse the XML label\n files every time.\n\n Arguments:\n filenames_path (str): The path under which to save the filenames pickle.\n labels_path (str): The path under which to save the labels pickle.\n image_ids_path (str, optional): The path under which to save the image IDs pickle.\n eval_neutral_path (str, optional): The path under which to save the pickle for\n the evaluation-neutrality annotations.\n '''\n with open(filenames_path, 'wb') as f:\n pickle.dump(self.filenames, f)\n if not labels_path is None:\n with open(labels_path, 'wb') as f:\n pickle.dump(self.labels, f)\n if not image_ids_path is None:\n with open(image_ids_path, 'wb') as f:\n pickle.dump(self.image_ids, f)\n if not eval_neutral_path is None:\n with open(eval_neutral_path, 'wb') as f:\n pickle.dump(self.eval_neutral, f)\n\n def get_dataset(self):\n '''\n Returns:\n 4-tuple containing lists and/or `None` for the filenames, labels, image IDs,\n and evaluation-neutrality annotations.\n '''\n return self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def get_dataset_size(self):\n '''\n Returns:\n The number of images in the dataset.\n '''\n return self.dataset_size\n" ]
[ [ "numpy.random.uniform", "numpy.any", "numpy.asarray", "numpy.arange", "numpy.stack", "numpy.concatenate", "numpy.array" ] ]
doaa-altarawy/ml_models_deploy
[ "d39f07887e75aedb0f3530934f0b61afe3fabbac" ]
[ "models/qc_time_estimator/qc_time_estimator/predict.py" ]
[ "import pandas as pd\nimport numpy as np\nfrom qc_time_estimator.config import config\nfrom qc_time_estimator.processing.data_management import load_pipeline\nfrom qc_time_estimator.processing.validation import validate_inputs\nfrom qc_time_estimator.metrics import mape, percentile_rel_90\nfrom qc_time_estimator import __version__ as _version\nimport logging\nfrom typing import Union, List\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_prediction(*, input_data: Union[pd.DataFrame, List[dict]]) -> dict:\n \"\"\"Make a prediction using a saved model pipeline.\n Throws exception for invalid input.\n\n Parameters\n ----------\n input_data : DataFram or list of dict\n Array of model prediction inputs.\n\n 1- Required input:\n cpu_clock_speed (in MHz, between 500 and 10,000)\n cpu_launch_year: (between 1990 and current year)\n\n driver: DriverEnum\n method: str\n\n 2- Required one of those two groups:\n\n molecule\n basis_set\n\n # OR\n nelec\n nmo\n\n 3- Optional:\n restricted: bool (default=False)\n nthreads: (default=1)\n\n Other extra fields are ignored and don't cause error.\n\n\n Returns\n -------\n Dict with Lit of Predictions for each input row,\n as well as the model version.\n \"\"\"\n\n pipeline_file_name = f'{config.PIPELINE_SAVE_FILE}{_version}.pkl'\n _qc_time = load_pipeline(file_name=pipeline_file_name)\n\n data = pd.DataFrame(input_data)\n validated_data = validate_inputs(input_data=data)\n\n prediction = _qc_time.predict(validated_data)\n\n results = {'predictions': prediction, 'version': _version}\n\n logger.info(\n f'Making predictions with model version: {_version} \\n'\n f'Original Input data: {data.to_dict(\"records\")} \\n'\n f'Validated Inputs: {validated_data.to_dict(\"records\")} \\n'\n f'Predictions: {results}')\n\n return results\n\ndef get_accuracy(model, X, y):\n \"\"\"Calculate the prediction acuracy (MAPE) and the Percentile for the\n given data using the given model\"\"\"\n\n pred = model.predict(X)\n mape_score = mape(y, pred)\n percentile_99 = percentile_rel_90(y, pred)\n\n return mape_score, percentile_99" ]
[ [ "pandas.DataFrame" ] ]
daanknoors/synthetic_data_generation
[ "5a0d1818cba2bc8b629869773a2f86a156d25fd9" ]
[ "synthesis/evaluation/evaluator.py" ]
[ "\"\"\"\nUtility evaluator. Comparing a reference dataset to 1 or more target datasets.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nimport synthesis.evaluation.metrics as metrics\nfrom synthesis.evaluation._base import BaseMetric, COLOR_PALETTE\n\n\n\nDEFAULT_METRICS = {\n 'average_js_distance': metrics.MarginalComparison(),\n 'pairwise_correlation_distance': metrics.AssociationsComparison()\n}\n\nclass SyntheticDataEvaluator(BaseMetric):\n \"\"\"Class to compare synthetic data to the original\"\"\"\n def __init__(self, metrics=None):\n \"\"\"Choose which metrics to compute\"\"\"\n self.metrics = metrics\n\n def fit(self, data_original, data_synthetic):\n self._check_input_args()\n data_original, data_synthetic = self._check_input_data(data_original, data_synthetic)\n\n for name, metric in self.metrics.items():\n metric.fit(data_original, data_synthetic)\n return self\n\n def score(self):\n scores = {}\n for name, metric in self.metrics.items():\n scores[name] = metric.score()\n return scores\n\n def plot(self):\n for name, metric in self.metrics.items():\n metric.plot()\n\n def _check_input_args(self):\n if self.metrics is not None:\n for name, metric in self.metrics.items():\n if not isinstance(metric, BaseMetric):\n raise ValueError(\"Input metric {} should subclass synthesis.evaluation._base.BaseMetric\".format(metric))\n else:\n self.metrics = DEFAULT_METRICS\n\n\nclass OriginalDataEvaluator():\n \"\"\"Class to evaluate input dataframe\"\"\"\n def __init__(self, cardinality_threshold=50, rare_category_threshold=0.05):\n self.cardinality_threshold = cardinality_threshold\n self.rare_category_threshold = rare_category_threshold\n\n def fit(self, data):\n self.stats_ = {}\n self.stats_['columns_high_cardinality'] = self.get_high_cardinality_columns(data, self.cardinality_threshold)\n self.stats_['rare_column_categories'] = self.get_rare_column_categories(data, self.rare_category_threshold)\n return self\n\n def plot(self, data, normalize=True):\n column_names = data.columns\n fig, ax = plt.subplots(len(column_names), 1, figsize=(8, len(column_names) * 4))\n\n for idx, col in enumerate(column_names):\n column_value_counts = data.value_counts(normalize=normalize)\n\n bar_position = np.arange(len(column_value_counts.values))\n bar_width = 0.5\n\n ax[idx].bar(x=bar_position, height=column_value_counts.values,\n color=COLOR_PALETTE[0], label='original', width=bar_width)\n\n ax[idx].set_xticks(bar_position + bar_width / 2)\n if len(column_value_counts.values) <= 20:\n ax[idx].set_xticklabels(column_value_counts.keys(), rotation=25)\n else:\n ax[idx].set_xticklabels('')\n\n title = r\"$\\bf{\" + col + \"}$\"\n ax[idx].set_title(title)\n if normalize:\n ax[idx].set_ylabel('Probability')\n else:\n ax[idx].set_ylabel('Count')\n\n ax[idx].legend()\n fig.tight_layout()\n plt.show()\n\n @staticmethod\n def get_high_cardinality_columns(data, threshold):\n \"\"\"Get features with more unique values than the specified threshold.\"\"\"\n return data.columns[data.nunique() > threshold].tolist()\n\n @staticmethod\n def get_rare_column_categories(data, threshold):\n \"\"\"Get rare categories per column\"\"\"\n rare_categories = {}\n for c in data.columns:\n rare_categories[c] = [k for k, v in data[c].value_counts(normalize=True).items() if v < threshold]\n return rare_categories\n\n" ]
[ [ "matplotlib.pyplot.show" ] ]
Lee-Gihun/Micronet_GSJ
[ "72289bb66507b6c3b4d14f2e5916dec718a1b198" ]
[ "AutoML_autoaug.py" ]
[ "# -*- coding: utf-8 -*-\nimport os\nos.environ['OMP_NUM_THREADS'] = '1'\nimport sys\nimport math\nimport random\nimport shutil\nimport pickle\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torchvision.models as models\nimport numpy as np\n\nfrom PIL import Image, ImageEnhance, ImageOps\n\nfrom hyperas import optim as hyperas_optim\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas.distributions import choice, uniform\nfrom hyperas.utils import eval_hyperopt_space\n\nfrom data_utils import *\nfrom train_tools import *\nfrom models import *\nfrom counting import *\n\ndef _logging():\n fpath = './results/AutoML/cifar100_autoaug_policy.log'\n logger = logging.getLogger('Autoaugment Policy')\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.FileHandler(fpath)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\ndef _get_conf():\n with open('./tmp.pickle', 'rb') as f:\n conf_name = pickle.load(f)\n \n opt = ConfLoader(conf_name).opt\n \n return opt\n \ndef data():\n # it just for processing, meaningless\n dataloader = None\n dataset_size = None\n \n return dataloader, dataset_size\n\ndef create_model(dataloader, dataset_size):\n class SubPolicy():\n def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):\n ranges = {\n \"shearX\": np.linspace(0, 0.3, 10),\n \"shearY\": np.linspace(0, 0.3, 10),\n \"translateX\": np.linspace(0, 150 / 331, 10),\n \"translateY\": np.linspace(0, 150 / 331, 10),\n \"rotate\": np.linspace(0, 30, 10),\n \"color\": np.linspace(0.0, 0.9, 10),\n \"posterize\": np.round(np.linspace(8, 4, 10), 0).astype(np.int),\n \"solarize\": np.linspace(256, 0, 10),\n \"contrast\": np.linspace(0.0, 0.9, 10),\n \"sharpness\": np.linspace(0.0, 0.9, 10),\n \"brightness\": np.linspace(0.0, 0.9, 10),\n \"autocontrast\": [0] * 10,\n \"equalize\": [0] * 10,\n \"invert\": [0] * 10\n }\n\n # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand\n def rotate_with_fill(img, magnitude):\n rot = img.convert(\"RGBA\").rotate(magnitude)\n return Image.composite(rot, Image.new(\"RGBA\", rot.size, (128,) * 4), rot).convert(img.mode)\n\n func = {\n \"shearX\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),\n Image.BICUBIC, fillcolor=fillcolor),\n \"shearY\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),\n Image.BICUBIC, fillcolor=fillcolor),\n \"translateX\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),\n fillcolor=fillcolor),\n \"translateY\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),\n fillcolor=fillcolor),\n \"rotate\": lambda img, magnitude: rotate_with_fill(img, magnitude),\n # \"rotate\": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),\n \"color\": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),\n \"posterize\": lambda img, magnitude: ImageOps.posterize(img, magnitude),\n \"solarize\": lambda img, magnitude: ImageOps.solarize(img, magnitude),\n \"contrast\": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"sharpness\": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"brightness\": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"autocontrast\": lambda img, magnitude: ImageOps.autocontrast(img),\n \"equalize\": lambda img, magnitude: ImageOps.equalize(img),\n \"invert\": lambda img, magnitude: ImageOps.invert(img)\n }\n\n # self.name = \"{}_{:.2f}_and_{}_{:.2f}\".format(\n # operation1, ranges[operation1][magnitude_idx1],\n # operation2, ranges[operation2][magnitude_idx2])\n self.p1 = p1\n self.operation1 = func[operation1]\n self.magnitude1 = ranges[operation1][magnitude_idx1]\n self.p2 = p2\n self.operation2 = func[operation2]\n self.magnitude2 = ranges[operation2][magnitude_idx2]\n\n\n def __call__(self, img):\n if random.random() < self.p1: img = self.operation1(img, self.magnitude1)\n if random.random() < self.p2: img = self.operation2(img, self.magnitude2)\n return img\n\n class Autoaug():\n def __init__(self, fillcolor=(128, 128, 128)):\n self.policies = [\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor)\n ]\n \n def __call__(self, img):\n policy_idx = random.randint(0, len(self.policies) - 1)\n return self.policies[policy_idx](img)\n\n def __repr__(self):\n return 'AutoAugment CIFAR100 Policy'\n\n opt = _get_conf()\n logger = _logging()\n if os.path.isdir(opt.data.root):\n shutil.rmtree(opt.data.root)\n \n DATASETTER = {'cifar10': cifar_10_setter,\n 'cifar100': cifar_100_setter}\n \n CRITERION = {'mse': nn.MSELoss,\n 'cross_entropy': nn.CrossEntropyLoss,\n 'label_smoothing': LabelSmoothingLoss}\n\n OPTIMIZER = {'sgd': optim.SGD,\n 'adam': optim.Adam,\n 'adagrad': optim.Adagrad,\n 'rmsprop': optim.RMSprop,\n 'radam': RAdam}\n\n dataloaders, dataset_sizes = DATASETTER[opt.data.dataset](batch_size=opt.data.batch_size, \n valid_size=opt.data.valid_size,\n root=opt.data.root,\n fixed_valid=opt.data.fixed_valid,\n autoaugment=opt.data.autoaugment,\n aug_policy=Autoaug())\n \n avail_resource = opt.model.param.avail_resource\n resolution_coefficient = opt.model.param.resolution_coefficient\n resolution_coefficient = round(math.pow(resolution_coefficient, avail_resource), 2)\n\n blocks_args, global_params = efficientnet(blocks_args='default',\n activation=opt.model.param.activation,\n activation_param=opt.model.param.get('activation_param', {}),\n resolution_coefficient=resolution_coefficient,\n width_coefficient=opt.model.param.width_coefficient, \n depth_coefficient=opt.model.param.depth_coefficient, \n image_size=opt.model.param.image_size, \n num_classes=opt.model.param.num_classes)\n \n #meaningless = {{choice(['No', 'meaning'])}}\n model = EfficientNet(blocks_args, \n global_params)\n \n model.to(opt.trainhandler.device)\n \n criterion = CRITERION[opt.criterion.algo](**opt.criterion.param) if opt.criterion.get('param') else CRITERION[opt.criterion.algo]() \n\n optimizer = OPTIMIZER[opt.optimizer.algo](model.parameters(), **opt.optimizer.param) if opt.optimizer.get('param') else OPTIMIZER[opt.optimizer.algo](model.parameters())\n \n # if not use scheduler, you can skip in config json file\n if opt.scheduler.get('enabled', False):\n scheduler_type = lr_scheduler.MultiStepLR if opt.scheduler.type == 'multistep' else lr_scheduler.CosineAnnealingLR if opt.scheduler.type == 'cosine' else lr_scheduler.StepLR\n scheduler = scheduler_type(optimizer, **opt.scheduler.param)\n else:\n scheduler = None\n \n train_handler = TrainHandler(model, \n dataloaders, \n dataset_sizes, \n criterion, \n optimizer, \n scheduler, \n device=opt.trainhandler.device, \n path=opt.trainhandler.path,\n mixup=opt.trainhandler.mixup.enabled,\n alpha=opt.trainhandler.mixup.alpha,\n precision=opt.trainhandler.precision)\n \n train_handler.set_name(opt.trainhandler.name)\n \n train_losses, valid_losses, train_accs, valid_accs = train_handler.train_model(num_epochs=opt.trainhandler.train.num_epochs)\n \n _, valid_loss = sorted(valid_losses, key = lambda x: x[1])[0]\n _, valid_acc = sorted(valid_accs, key = lambda x: x[1], reverse=True)[0]\n \n logger.info('Validation accuracy : %.2f' % (valid_acc * 100))\n \n return {'loss': valid_loss, 'status': STATUS_OK, 'model': train_handler.model}\n \nif __name__ == '__main__':\n conf_name = sys.argv[1]\n with open('./tmp.pickle', 'wb') as f:\n pickle.dump(conf_name, f)\n \n fpath = './results/AutoML'\n if not os.path.isdir(fpath):\n os.makedirs(fpath)\n if os.path.isfile('./results/AutoML/cifar100_autoaug_policy.log'):\n os.remove('./results/AutoML/cifar100_autoaug_policy.log')\n \n opt = ConfLoader(conf_name).opt\n logger = _logging()\n \n DATASETTER = {'cifar10': cifar_10_setter,\n 'cifar100': cifar_100_setter}\n \n CRITERION = {'mse': nn.MSELoss,\n 'cross_entropy': nn.CrossEntropyLoss,\n 'label_smoothing': LabelSmoothingLoss}\n\n OPTIMIZER = {'sgd': optim.SGD,\n 'adam': optim.Adam,\n 'adagrad': optim.Adagrad,\n 'rmsprop': optim.RMSprop,\n 'radam': RAdam}\n \n trials = Trials()\n best_run, best_model, space = hyperas_optim.minimize(model=create_model,\n data=data,\n algo=tpe.suggest,\n functions=[_get_conf, _logging],\n max_evals=1,\n trials=trials,\n eval_space=True,\n return_space=True)\n \n logger.info('=' * 30)\n logger.info('Best performing model chosen hyper-parameters: %s' % best_run)\n logger.info('=' * 30)\n \n for t, trial in enumerate(trials):\n vals = trial.get('misc').get('vals')\n tmp = {}\n for k,v in list(vals.items()):\n tmp[k] = v[0]\n logger.info('Trial %d : %s' % (t, eval_hyperopt_space(space, tmp)))\n logger.info('=' * 30)\n \n os.remove('./tmp.pickle')" ]
[ [ "numpy.linspace" ] ]
aliwimo/uav_placement
[ "85cde62361dd2bbfd907033b6954998b3461b1ee" ]
[ "firefly/config_file.py" ]
[ "import numpy as np\r\n\r\nPOP_SIZE = 50\r\nMAX_GEN = 1\r\nDIM_SIZE = 3\r\nALPHA = 1.0\r\nBETA0 = 0.5\r\nGAMMA = 1.0\r\nBOUND = 1000\r\nUB = BOUND\r\nLB = -BOUND\r\nBUILDING = [20, 50, 200] #b1\r\n# BUILDING = [20, 50, 250] #b2\r\n# BUILDING = [20, 50, 300] #b3\r\n# BUILDING = [10, 50, 250] #b4\r\n# BUILDING = [30, 50, 250] #b5\r\n# BUILDING = [50, 50, 250] #b6\r\n\r\n\r\nLocation_Array = [0] * DIM_SIZE\r\nFirefly_List = [0] * POP_SIZE\r\nO_Firefly_List = [0] * POP_SIZE\r\nFitnesses = [0] * POP_SIZE\r\nBest = []\r\nUsers_Locations = np.loadtxt( 'users/UserLocations_20_50_200.dat' ) #u1\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_20_50_250.dat' ) #u2\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_20_50_300.dat' ) #u3\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_10_50_250.dat' ) #u4\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_30_50_250.dat' ) #u5\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_50_50_250.dat' ) #u6\r\n\r\n" ]
[ [ "numpy.loadtxt" ] ]
Megscammell/METOD-Algorithm
[ "7518145ec100599bddc880f5f52d28f9a3959108" ]
[ "src/metod_alg/objective_functions/calc_minimizer_sog.py" ]
[ "import numpy as np\nfrom numpy import linalg as LA\n\n\ndef calc_minimizer_sog(point, p, sigma_sq, store_x0, matrix_test, store_c):\n \"\"\"\n Finds the nearest local minimizer for point using the Sum of Gaussians\n function.\n\n Parameters\n ----------\n point : 1-D array with shape (d, )\n A point used to evaluate the function.\n p : integer\n Number of local minima.\n sigma_sq: float or integer\n Value of sigma squared.\n store_x0 : 2-D arrays with shape (p, d).\n matrix_test : 3-D arrays with shape (p, d, d).\n store_c : 3-D arrays with shape (p, ).\n\n Returns\n -------\n np.argmin(dist) : integer\n Position of the local minimizer which produces the\n smallest distance between point and all p local\n minimizers.\n \"\"\"\n dist = np.zeros((p))\n for i in range(p):\n dist[i] = LA.norm(point - store_x0[i])\n assert(np.min(dist) < 0.25)\n return np.argmin(dist)\n" ]
[ [ "numpy.argmin", "numpy.linalg.norm", "numpy.min", "numpy.zeros" ] ]
Giannos-G/scikit-learn_modified
[ "03df71bbea1bcb3423262b711191552420422cda" ]
[ "examples/applications/plot_out_of_core_classification.py" ]
[ "\"\"\"\n======================================================\nOut-of-core classification of text documents\n======================================================\n\nThis is an example showing how scikit-learn can be used for classification\nusing an out-of-core approach: learning from data that doesn't fit into main\nmemory. We make use of an online classifier, i.e., one that supports the\npartial_fit method, that will be fed with batches of examples. To guarantee\nthat the features space remains the same over time we leverage a\nHashingVectorizer that will project each example into the same feature space.\nThis is especially useful in the case of text classification where new\nfeatures (words) may appear in each batch.\n\"\"\"\n\n# Authors: Eustache Diemert <[email protected]>\n# @FedericoV <https://github.com/FedericoV/>\n# License: BSD 3 clause\n\nfrom glob import glob\nimport itertools\nimport os.path\nimport re\nimport tarfile\nimport time\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\nfrom html.parser import HTMLParser\nfrom urllib.request import urlretrieve\nfrom sklearn.datasets import get_data_home\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.naive_bayes import MultinomialNB\n\n\ndef _not_in_sphinx():\n # Hack to detect whether we are running by the sphinx builder\n return '__file__' in globals()\n\n# %%\n# Reuters Dataset related routines\n# --------------------------------\n#\n# The dataset used in this example is Reuters-21578 as provided by the UCI ML\n# repository. It will be automatically downloaded and uncompressed on first\n# run.\n\n\n\nclass ReutersParser(HTMLParser):\n \"\"\"Utility class to parse a SGML file and yield documents one at a time.\"\"\"\n\n def __init__(self, encoding='latin-1'):\n HTMLParser.__init__(self)\n self._reset()\n self.encoding = encoding\n\n def handle_starttag(self, tag, attrs):\n method = 'start_' + tag\n getattr(self, method, lambda x: None)(attrs)\n\n def handle_endtag(self, tag):\n method = 'end_' + tag\n getattr(self, method, lambda: None)()\n\n def _reset(self):\n self.in_title = 0\n self.in_body = 0\n self.in_topics = 0\n self.in_topic_d = 0\n self.title = \"\"\n self.body = \"\"\n self.topics = []\n self.topic_d = \"\"\n\n def parse(self, fd):\n self.docs = []\n for chunk in fd:\n self.feed(chunk.decode(self.encoding))\n for doc in self.docs:\n yield doc\n self.docs = []\n self.close()\n\n def handle_data(self, data):\n if self.in_body:\n self.body += data\n elif self.in_title:\n self.title += data\n elif self.in_topic_d:\n self.topic_d += data\n\n def start_reuters(self, attributes):\n pass\n\n def end_reuters(self):\n self.body = re.sub(r'\\s+', r' ', self.body)\n self.docs.append({'title': self.title,\n 'body': self.body,\n 'topics': self.topics})\n self._reset()\n\n def start_title(self, attributes):\n self.in_title = 1\n\n def end_title(self):\n self.in_title = 0\n\n def start_body(self, attributes):\n self.in_body = 1\n\n def end_body(self):\n self.in_body = 0\n\n def start_topics(self, attributes):\n self.in_topics = 1\n\n def end_topics(self):\n self.in_topics = 0\n\n def start_d(self, attributes):\n self.in_topic_d = 1\n\n def end_d(self):\n self.in_topic_d = 0\n self.topics.append(self.topic_d)\n self.topic_d = \"\"\n\n\ndef stream_reuters_documents(data_path=None):\n \"\"\"Iterate over documents of the Reuters dataset.\n\n The Reuters archive will automatically be downloaded and uncompressed if\n the `data_path` directory does not exist.\n\n Documents are represented as dictionaries with 'body' (str),\n 'title' (str), 'topics' (list(str)) keys.\n\n \"\"\"\n\n DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'\n 'reuters21578-mld/reuters21578.tar.gz')\n ARCHIVE_FILENAME = 'reuters21578.tar.gz'\n\n if data_path is None:\n data_path = os.path.join(get_data_home(), \"reuters\")\n if not os.path.exists(data_path):\n \"\"\"Download the dataset.\"\"\"\n print(\"downloading dataset (once and for all) into %s\" %\n data_path)\n os.mkdir(data_path)\n\n def progress(blocknum, bs, size):\n total_sz_mb = '%.2f MB' % (size / 1e6)\n current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)\n if _not_in_sphinx():\n sys.stdout.write(\n '\\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))\n\n archive_path = os.path.join(data_path, ARCHIVE_FILENAME)\n urlretrieve(DOWNLOAD_URL, filename=archive_path,\n reporthook=progress)\n if _not_in_sphinx():\n sys.stdout.write('\\r')\n print(\"untarring Reuters dataset...\")\n tarfile.open(archive_path, 'r:gz').extractall(data_path)\n print(\"done.\")\n\n parser = ReutersParser()\n for filename in glob(os.path.join(data_path, \"*.sgm\")):\n for doc in parser.parse(open(filename, 'rb')):\n yield doc\n\n\n# %%\n# Main\n# ----\n#\n# Create the vectorizer and limit the number of features to a reasonable\n# maximum\n\nvectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,\n alternate_sign=False)\n\n\n# Iterator over parsed Reuters SGML files.\ndata_stream = stream_reuters_documents()\n\n# We learn a binary classification between the \"acq\" class and all the others.\n# \"acq\" was chosen as it is more or less evenly distributed in the Reuters\n# files. For other datasets, one should take care of creating a test set with\n# a realistic portion of positive instances.\nall_classes = np.array([0, 1])\npositive_class = 'acq'\n\n# Here are some classifiers that support the `partial_fit` method\npartial_fit_classifiers = {\n 'SGD': SGDClassifier(max_iter=5),\n 'Perceptron': Perceptron(),\n 'NB Multinomial': MultinomialNB(alpha=0.01),\n 'Passive-Aggressive': PassiveAggressiveClassifier(),\n}\n\n\ndef get_minibatch(doc_iter, size, pos_class=positive_class):\n \"\"\"Extract a minibatch of examples, return a tuple X_text, y.\n\n Note: size is before excluding invalid docs with no topics assigned.\n\n \"\"\"\n data = [('{title}\\n\\n{body}'.format(**doc), pos_class in doc['topics'])\n for doc in itertools.islice(doc_iter, size)\n if doc['topics']]\n if not len(data):\n return np.asarray([], dtype=int), np.asarray([], dtype=int)\n X_text, y = zip(*data)\n return X_text, np.asarray(y, dtype=int)\n\n\ndef iter_minibatches(doc_iter, minibatch_size):\n \"\"\"Generator of minibatches.\"\"\"\n X_text, y = get_minibatch(doc_iter, minibatch_size)\n while len(X_text):\n yield X_text, y\n X_text, y = get_minibatch(doc_iter, minibatch_size)\n\n\n# test data statistics\ntest_stats = {'n_test': 0, 'n_test_pos': 0}\n\n# First we hold out a number of examples to estimate accuracy\nn_test_documents = 1000\ntick = time.time()\nX_test_text, y_test = get_minibatch(data_stream, 1000)\nparsing_time = time.time() - tick\ntick = time.time()\nX_test = vectorizer.transform(X_test_text)\nvectorizing_time = time.time() - tick\ntest_stats['n_test'] += len(y_test)\ntest_stats['n_test_pos'] += sum(y_test)\nprint(\"Test set is %d documents (%d positive)\" % (len(y_test), sum(y_test)))\n\n\ndef progress(cls_name, stats):\n \"\"\"Report progress information, return a string.\"\"\"\n duration = time.time() - stats['t0']\n s = \"%20s classifier : \\t\" % cls_name\n s += \"%(n_train)6d train docs (%(n_train_pos)6d positive) \" % stats\n s += \"%(n_test)6d test docs (%(n_test_pos)6d positive) \" % test_stats\n s += \"accuracy: %(accuracy).3f \" % stats\n s += \"in %.2fs (%5d docs/s)\" % (duration, stats['n_train'] / duration)\n return s\n\n\ncls_stats = {}\n\nfor cls_name in partial_fit_classifiers:\n stats = {'n_train': 0, 'n_train_pos': 0,\n 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),\n 'runtime_history': [(0, 0)], 'total_fit_time': 0.0}\n cls_stats[cls_name] = stats\n\nget_minibatch(data_stream, n_test_documents)\n# Discard test set\n\n# We will feed the classifier with mini-batches of 1000 documents; this means\n# we have at most 1000 docs in memory at any time. The smaller the document\n# batch, the bigger the relative overhead of the partial fit methods.\nminibatch_size = 1000\n\n# Create the data_stream that parses Reuters SGML files and iterates on\n# documents as a stream.\nminibatch_iterators = iter_minibatches(data_stream, minibatch_size)\ntotal_vect_time = 0.0\n\n# Main loop : iterate on mini-batches of examples\nfor i, (X_train_text, y_train) in enumerate(minibatch_iterators):\n\n tick = time.time()\n X_train = vectorizer.transform(X_train_text)\n total_vect_time += time.time() - tick\n\n for cls_name, cls in partial_fit_classifiers.items():\n tick = time.time()\n # update estimator with examples in the current mini-batch\n cls.partial_fit(X_train, y_train, classes=all_classes)\n\n # accumulate test accuracy stats\n cls_stats[cls_name]['total_fit_time'] += time.time() - tick\n cls_stats[cls_name]['n_train'] += X_train.shape[0]\n cls_stats[cls_name]['n_train_pos'] += sum(y_train)\n tick = time.time()\n cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)\n cls_stats[cls_name]['prediction_time'] = time.time() - tick\n acc_history = (cls_stats[cls_name]['accuracy'],\n cls_stats[cls_name]['n_train'])\n cls_stats[cls_name]['accuracy_history'].append(acc_history)\n run_history = (cls_stats[cls_name]['accuracy'],\n total_vect_time + cls_stats[cls_name]['total_fit_time'])\n cls_stats[cls_name]['runtime_history'].append(run_history)\n\n if i % 3 == 0:\n print(progress(cls_name, cls_stats[cls_name]))\n if i % 3 == 0:\n print('\\n')\n\n\n# %%\n# Plot results\n# ------------\n#\n# The plot represents the learning curve of the classifier: the evolution\n# of classification accuracy over the course of the mini-batches. Accuracy is\n# measured on the first 1000 samples, held out as a validation set.\n#\n# To limit the memory consumption, we queue examples up to a fixed amount\n# before feeding them to the learner.\n\n\ndef plot_accuracy(x, y, x_legend):\n \"\"\"Plot accuracy as a function of x.\"\"\"\n x = np.array(x)\n y = np.array(y)\n plt.title('Classification accuracy as a function of %s' % x_legend)\n plt.xlabel('%s' % x_legend)\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.plot(x, y)\n\n\nrcParams['legend.fontsize'] = 10\ncls_names = list(sorted(cls_stats.keys()))\n\n# Plot accuracy evolution\nplt.figure()\nfor _, stats in sorted(cls_stats.items()):\n # Plot accuracy evolution with #examples\n accuracy, n_examples = zip(*stats['accuracy_history'])\n plot_accuracy(n_examples, accuracy, \"training examples (#)\")\n ax = plt.gca()\n ax.set_ylim((0.8, 1))\nplt.legend(cls_names, loc='best')\n\nplt.figure()\nfor _, stats in sorted(cls_stats.items()):\n # Plot accuracy evolution with runtime\n accuracy, runtime = zip(*stats['runtime_history'])\n plot_accuracy(runtime, accuracy, 'runtime (s)')\n ax = plt.gca()\n ax.set_ylim((0.8, 1))\nplt.legend(cls_names, loc='best')\n\n# Plot fitting times\nplt.figure()\nfig = plt.gcf()\ncls_runtime = [stats['total_fit_time']\n for cls_name, stats in sorted(cls_stats.items())]\n\ncls_runtime.append(total_vect_time)\ncls_names.append('Vectorization')\nbar_colors = ['b', 'g', 'r', 'c', 'm', 'y']\n\nax = plt.subplot(111)\nrectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,\n color=bar_colors)\n\nax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))\nax.set_xticklabels(cls_names, fontsize=10)\nymax = max(cls_runtime) * 1.2\nax.set_ylim((0, ymax))\nax.set_ylabel('runtime (s)')\nax.set_title('Training Times')\n\n\ndef autolabel(rectangles):\n \"\"\"attach some text vi autolabel on rectangles.\"\"\"\n for rect in rectangles:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2.,\n 1.05 * height, '%.4f' % height,\n ha='center', va='bottom')\n plt.setp(plt.xticks()[1], rotation=30)\n\n\nautolabel(rectangles)\nplt.tight_layout()\n#plt.show()\n\n# Plot prediction times\nplt.figure()\ncls_runtime = []\ncls_names = list(sorted(cls_stats.keys()))\nfor cls_name, stats in sorted(cls_stats.items()):\n cls_runtime.append(stats['prediction_time'])\ncls_runtime.append(parsing_time)\ncls_names.append('Read/Parse\\n+Feat.Extr.')\ncls_runtime.append(vectorizing_time)\ncls_names.append('Hashing\\n+Vect.')\n\nax = plt.subplot(111)\nrectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,\n color=bar_colors)\n\nax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))\nax.set_xticklabels(cls_names, fontsize=8)\nplt.setp(plt.xticks()[1], rotation=30)\nymax = max(cls_runtime) * 1.2\nax.set_ylim((0, ymax))\nax.set_ylabel('runtime (s)')\nax.set_title('Prediction Times (%d instances)' % n_test_documents)\nautolabel(rectangles)\nplt.tight_layout()\n#plt.show()" ]
[ [ "matplotlib.pyplot.tight_layout", "numpy.asarray", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "matplotlib.pyplot.xticks", "sklearn.linear_model.SGDClassifier", "matplotlib.pyplot.figure", "matplotlib.pyplot.gcf", "matplotlib.pyplot.gca", "matplotlib.pyplot.title", "sklearn.linear_model.Perceptron", "sklearn.naive_bayes.MultinomialNB", "sklearn.linear_model.PassiveAggressiveClassifier", "sklearn.feature_extraction.text.HashingVectorizer", "matplotlib.pyplot.legend", "matplotlib.pyplot.grid", "matplotlib.pyplot.subplot", "sklearn.datasets.get_data_home", "numpy.array", "matplotlib.pyplot.xlabel" ] ]
bagustris/calfem-python
[ "f5946c7d822ec70d6420a36d197c41ad263d05a0" ]
[ "calfem/vis_mpl.py" ]
[ "# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nimport matplotlib.path as mpp\nimport matplotlib.patches as patches\nimport matplotlib as mpl\nimport matplotlib.tri as tri\n\ntry:\n from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nexcept:\n print(\"Could not import Matplotlib backends. Probarbly due to missing Qt.\")\n\nfrom numpy import sin, cos, pi\nfrom math import atan2\n\nimport logging as cflog\n\n\ndef error(msg):\n \"\"\"Log error message\"\"\"\n cflog.error(msg)\n\n\ndef info(msg):\n \"\"\"Log information message\"\"\"\n cflog.info(msg)\n\n\ndef figure_class():\n \"\"\"Return visvis Figure class.\"\"\"\n return None\n\n\nfigureClass = figure_class\n\ncfv_def_mappable = None\n\n\ndef set_mappable(mappable):\n global cfv_def_mappable\n cfv_def_mappable = mappable\n\n\ndef colorbar(**kwargs):\n \"\"\"Add a colorbar to current figure\"\"\"\n global cfv_def_mappable\n if cfv_def_mappable != None:\n cbar = plt.colorbar(mappable=cfv_def_mappable, ax=plt.gca(), **kwargs)\n cfv_def_mappable = None\n return cbar\n else:\n return plt.colorbar(**kwargs)\n\n\ndef figure(figure=None, show=True, fig_size=(4, 3)):\n \"\"\"Create a visvis figure with extras.\"\"\"\n f = None\n\n if figure == None:\n f = plt.figure(figsize=fig_size)\n else:\n try:\n f = plt.figure(figure)\n except:\n f = plt.figure(figsize=fig_size)\n\n return f\n\n\ndef figure_widget(fig, parent=None):\n widget = FigureCanvas(fig)\n if parent != None:\n widget.setParent(parent)\n toolbar = NavigationToolbar(widget, widget)\n return widget\n\n\ndef close_all():\n \"\"\"Close all visvis windows.\"\"\"\n plt.close('all')\n\n\ncloseAll = close_all\n\n\ndef clf():\n \"\"\"Clear visvis figure\"\"\"\n plt.clf()\n\n\ndef gca():\n \"\"\"Get current axis of the current visvis figure.\"\"\"\n return plt.gca()\n\n\ndef gcf():\n return plt.gcf()\n\n\ndef subplot(*args):\n \"\"\"Create a visvis subplot.\"\"\"\n return plt.subplot(*args)\n\n\ndef camera3d():\n \"\"\"Get visvis 3D camera.\"\"\"\n return None\n\n\ndef show_and_wait():\n \"\"\"Wait for plot to show\"\"\"\n plt.show()\n\n\nshowAndWait = show_and_wait\n\n\ndef show_and_wait_mpl():\n \"\"\"Wait for plot to show\"\"\"\n plt.show()\n\n\nshowAndWaitMpl = show_and_wait_mpl\n\n\ndef set_figure_dpi(dpi):\n mpl.rcParams['figure.dpi'] = dpi\n\n\ndef text(text, pos, angle=0, **kwargs):\n return plt.text(pos[0], pos[1], text, **kwargs)\n\n\nadd_text = text\naddText = text\nlabel = text\n\n\ndef ce2vf(coords, edof, dofs_per_node, el_type):\n '''Duplicate code. Extracts verts, faces and verticesPerFace from input.'''\n\n if np.shape(coords)[1] == 2:\n is_3d = False\n # pad with zeros to make 3D\n verts = np.hstack((coords, np.zeros([np.shape(coords)[0], 1])))\n elif np.shape(coords)[1] == 3:\n is_3d = True\n verts = coords\n else:\n raise ValueError('coords must be N-by-2 or N-by-3 array')\n\n if el_type in [2, 4]: # elements with triangular faces\n vertices_per_face = 3\n elif el_type in [3, 5, 16]: # elements with rectangular faces\n vertices_per_face = 4\n else: # [NOTE] This covers all element types available in CALFEM plus tetrahedrons. If more element types are added it is necessary to include them here and below.\n raise ValueError('element type not implemented')\n\n faces = (edof[:, 0::dofs_per_node]-1)/dofs_per_node\n # 'faces' here are actually lists of nodes in elements, not in faces necessarily if the elements are in 3D. This case is handled below.\n\n if el_type in [4, 5]: # if hexahedrons or tetrahedrons:\n if el_type == 5:\n G = np.array([[0, 3, 2, 1],\n [0, 1, 5, 4],\n [4, 5, 6, 7],\n [2, 6, 5, 1],\n [2, 3, 7, 6],\n [0, 4, 7, 3]]) # G is an array that is used to decomposes hexahedrons into its component faces.\n # The numbers are from the node orders (see p94 in the Gmsh manual) and each row makes one face.\n elif el_type == 4:\n G = np.array([[0, 1, 2],\n [0, 3, 2],\n [1, 3, 2],\n [0, 3, 1]]) # This G decomposes tetrahedrons into faces\n faces = np.vstack([faces[i, G] for i in range(faces.shape[0])])\n elif el_type == 16: # if 8-node-quads:\n # The first 4 nodes are the corners of the high order quad.\n faces = faces[:, 0:4]\n\n return verts, np.asarray(faces, dtype=int), vertices_per_face, is_3d\n\n\ndef draw_mesh(coords, edof, dofs_per_node, el_type, title=None, color=(0, 0, 0), face_color=(0.8, 0.8, 0.8), node_color=(0, 0, 0), filled=False, show_nodes=False):\n '''\n Draws wire mesh of model in 2D or 3D. Returns the Mesh object that represents\n the mesh.\n Args:\n coords:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n edof:\n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n dofs_per_nodes:\n Integer. Dofs per node.\n el_type:\n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n axes:\n Matplotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n axes_adjust:\n Boolean. True if the view should be changed to show the whole model. Default True.\n title:\n String. Changes title of the figure. Default \"Mesh\".\n color: \n 3-tuple or char. Color of the wire. Defaults to black (0,0,0). Can also be given as a character in 'rgbycmkw'.\n face_color:\n 3-tuple or char. Color of the faces. Defaults to white (1,1,1). Parameter filled must be True or faces will not be drawn at all.\n filled:\n Boolean. Faces will be drawn if True. Otherwise only the wire is drawn. Default False.\n '''\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n values = np.zeros(faces.shape[0], float)\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n if filled:\n pc = matplotlib.collections.PolyCollection(\n v, facecolor=face_color, **kwargs)\n else:\n pc = matplotlib.collections.PolyCollection(\n v, facecolor='none', **kwargs)\n\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n pc = quatplot(y, z, faces, values, ax=ax, edgecolor=color)\n\n if show_nodes:\n ax.plot(y, z, marker=\"o\", ls=\"\", color=node_color)\n\n if title != None:\n ax.set(title=title)\n\n\ndrawMesh = draw_mesh\n\n\ndef draw_element_values(values, coords, edof, dofs_per_node, el_type, displacements=None, draw_elements=True, draw_undisplaced_mesh=False, magnfac=1.0, title=None, color=(0, 0, 0), node_color=(0, 0, 0)):\n '''\n Draws scalar element values in 2D or 3D. \n\n Args:\n ev: \n An N-by-1 array or a list of scalars. The Scalar values of the elements. ev[i] should be the value of element i.\n \n coords:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n\n edof:\n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n\n dofs_per_node:\n Integer. Dofs per node.\n\n el_type: \n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n \n displacements:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z displacements of node i.\n \n draw_mesh:\n Boolean. True if mesh wire should be drawn. Default True.\n\n draw_undisplaced_mesh: \n Boolean. True if the wire of the undisplaced mesh should be drawn on top of the displaced mesh. Default False. Use only if displacements != None.\n\n magnfac: \n Float. Magnification factor. Displacements are multiplied by this value. Use this to make small displacements more visible.\n\n title: \n String. Changes title of the figure. Default \"Element Values\".\n '''\n\n if draw_undisplaced_mesh:\n draw_mesh(coords, edof, dofs_per_node, el_type, color=(0.5, 0.5, 0.5))\n\n if displacements is not None:\n if displacements.shape[1] != coords.shape[1]:\n displacements = np.reshape(displacements, (-1, coords.shape[1]))\n coords = np.asarray(coords + magnfac * displacements)\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n pc = matplotlib.collections.PolyCollection(\n v, **kwargs)\n\n pc.set_array(np.asarray(values))\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_aspect('equal')\n\n if draw_elements:\n pc = quatplot(y, z, faces, values, ax=ax,\n edgecolor=color)\n else:\n pc = quatplot(y, z, faces, values, ax=ax,\n edgecolor=None)\n\n # pc = quatplot(y,z, np.asarray(edof-1), values, ax=ax,\n # edgecolor=\"crimson\", cmap=\"rainbow\")\n\n set_mappable(pc)\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_displacements(a, coords, edof, dofs_per_node, el_type, draw_undisplaced_mesh=False, magnfac=-1.0, magscale=0.25, title=None, color=(0, 0, 0), node_color=(0, 0, 0)):\n '''\n Draws scalar element values in 2D or 3D. Returns the world object\n elementsWobject that represents the mesh.\n\n Args:\n ev: \n An N-by-1 array or a list of scalars. The Scalar values of the elements. ev[i] should be the value of element i.\n coords: \n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n edof: \n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n dofs_per_node: \n Integer. Dofs per node.\n el_type: \n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n displacements: \n An N-by-2 or N-by-3 array. Row i contains the x,y,z displacements of node i.\n axes: \n Matlotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n draw_undisplaced_mesh:\n Boolean. True if the wire of the undisplaced mesh should be drawn on top of the displaced mesh. Default False. Use only if displacements != None.\n magnfac: \n Float. Magnification factor. Displacements are multiplied by this value. Use this to make small displacements more visible.\n title: \n String. Changes title of the figure. Default \"Element Values\".\n '''\n\n if draw_undisplaced_mesh:\n draw_mesh(coords, edof, dofs_per_node, el_type, color=(0.8, 0.8, 0.8))\n\n if a is not None:\n if a.shape[1] != coords.shape[1]:\n a = np.reshape(a, (-1, coords.shape[1]))\n\n x_max = np.max(coords[:, 0])\n x_min = np.min(coords[:, 0])\n\n y_max = np.max(coords[:, 1])\n y_min = np.min(coords[:, 1])\n\n x_size = x_max - x_min\n y_size = y_max - y_min\n\n if x_size > y_size:\n max_size = x_size\n else:\n max_size = y_size\n\n if magnfac < 0:\n magnfac = 0.25*max_size\n\n coords = np.asarray(coords + magnfac * a)\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n values = []\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n pc = matplotlib.collections.PolyCollection(\n v, **kwargs)\n\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n pc = quatplot(y, z, faces, values, ax=ax, edgecolor=(\n 0.3, 0.3, 0.3), facecolor='none')\n\n if title != None:\n ax.set(title=title)\n\n\ndef create_ordered_polys(geom, N=10):\n \"\"\"Creates ordered polygons from the geometry definition\"\"\"\n\n N = 10\n\n o_polys = []\n\n for (id, (surf_name, curve_ids, holes, _, _, _)) in geom.surfaces.items():\n\n polygon = np.empty((0, 3), float)\n\n polys = []\n\n for curve_id in curve_ids:\n\n curve_name, curve_points, _, _, _, _ = geom.curves[curve_id]\n points = geom.get_point_coords(curve_points)\n\n if curve_name == \"Spline\":\n P = _catmullspline(points, N)\n if curve_name == \"BSpline\":\n P = _bspline(points, N)\n if curve_name == \"Circle\":\n P = _circleArc(*points, pointsOnCurve=N)\n if curve_name == \"Ellipse\":\n P = _ellipseArc(*points, pointsOnCurve=N)\n\n polys.append(P)\n\n ordered_polys = []\n\n ordered_polys.append(polys.pop())\n\n while len(polys) != 0:\n p0 = ordered_polys[-1]\n for p in polys:\n if np.allclose(p0[-1], p[0]):\n ordered_polys.append(polys.pop())\n break\n elif np.allclose(p0[-1], p[-1]):\n ordered_polys.append(np.flipud(polys.pop()))\n break\n\n for p in ordered_polys:\n polygon = np.concatenate((polygon, p))\n\n o_polys.append(polygon)\n\n return o_polys\n\n\ndef draw_ordered_polys(o_polys):\n\n for poly in o_polys:\n\n ax = plt.gca()\n path = mpp.Path(poly[:, 0:2])\n patch = patches.PathPatch(path, facecolor='orange', lw=1)\n ax.add_patch(patch)\n\n\ndef point_in_geometry(o_polys, point):\n\n for poly in o_polys:\n\n path = mpp.Path(poly[:, 0:2])\n inside = path.contains_points([point])\n\n if inside:\n return True\n\n return False\n\n\ndef topo_to_tri(edof):\n \"\"\"Converts 2d element topology to triangle topology to be used\n with the matplotlib functions tricontour and tripcolor.\"\"\"\n\n if edof.shape[1] == 3:\n return edof\n elif edof.shape[1] == 4:\n new_edof = np.zeros((edof.shape[0]*2, 3), int)\n new_edof[0::2, 0] = edof[:, 0]\n new_edof[0::2, 1] = edof[:, 1]\n new_edof[0::2, 2] = edof[:, 2]\n new_edof[1::2, 0] = edof[:, 2]\n new_edof[1::2, 1] = edof[:, 3]\n new_edof[1::2, 2] = edof[:, 0]\n return new_edof\n elif edof.shape[1] == 8:\n new_edof = np.zeros((edof.shape[0]*6, 3), int)\n new_edof[0::6, 0] = edof[:, 0]\n new_edof[0::6, 1] = edof[:, 4]\n new_edof[0::6, 2] = edof[:, 7]\n new_edof[1::6, 0] = edof[:, 4]\n new_edof[1::6, 1] = edof[:, 1]\n new_edof[1::6, 2] = edof[:, 5]\n new_edof[2::6, 0] = edof[:, 5]\n new_edof[2::6, 1] = edof[:, 2]\n new_edof[2::6, 2] = edof[:, 6]\n new_edof[3::6, 0] = edof[:, 6]\n new_edof[3::6, 1] = edof[:, 3]\n new_edof[3::6, 2] = edof[:, 7]\n new_edof[4::6, 0] = edof[:, 4]\n new_edof[4::6, 1] = edof[:, 6]\n new_edof[4::6, 2] = edof[:, 7]\n new_edof[5::6, 0] = edof[:, 4]\n new_edof[5::6, 1] = edof[:, 5]\n new_edof[5::6, 2] = edof[:, 6]\n return new_edof\n else:\n error(\"Element topology not supported.\")\n\n\ndef draw_nodal_values_contourf(values, coords, edof, levels=12, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as filled contours. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tricontourf(x, y, edof_tri - 1, v.ravel(), levels)\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_nodal_values_contour(values, coords, edof, levels=12, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as filled contours. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tricontour(x, y, edof_tri - 1, v.ravel(), levels)\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_nodal_values_shaded(values, coords, edof, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as shaded triangles. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tripcolor(x, y, edof_tri - 1, v.ravel(), shading=\"gouraud\")\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndraw_nodal_values = draw_nodal_values_contourf\n\n\ndef draw_geometry(geometry, draw_points=True, label_points=True, label_curves=True, title=None, font_size=11, N=20, rel_margin=0.05, draw_axis=False):\n '''\n Draws the geometry (points and curves) in geoData\n Args:\n geoData:\n GeoData object. Geodata contains geometric information of the model.\n axes:\n Matplotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n axes_adjust:\n Boolean. If True the view will be changed to show the whole model. Default True.\n draw_points: \n Boolean. If True points will be drawn.\n label_points:\n Boolean. If True Points will be labeled. The format is: ID[marker]. If a point has marker==0 only the ID is written.\n label_curves:\n Boolean. If True Curves will be labeled. The format is: ID(elementsOnCurve)[marker].\n font_size:\n Integer. Size of the text in the text labels. Default 11.\n N:\n Integer. The number of discrete points per curve segment. Default 20. Increase for smoother curves. Decrease for better performance.\n rel_margin:\n Extra spacing between geometry and axis\n '''\n\n ax = plt.gca()\n ax.set_aspect('equal')\n ax.set_frame_on(draw_axis)\n\n if draw_points:\n P = np.array(geometry.getPointCoords()) # M-by-3 list of M points.\n #plotArgs = {'mc':'r', 'mw':5, 'lw':0, 'ms':'o', 'axesAdjust':False, 'axes':axes}\n plotArgs = {\"marker\": \"o\", \"ls\": \"\"}\n if geometry.is3D:\n plt.plot(P[:, 0], P[:, 1], P[:, 2], **plotArgs)\n else:\n plt.plot(P[:, 0], P[:, 1], **plotArgs)\n\n if label_points: # Write text label at the points:\n # [[x, y, z], elSize, marker]\n for (ID, (xyz, el_size, marker)) in geometry.points.items():\n text = \" \" + str(ID) + (\"[%s]\" %\n marker if marker is not 0 else '')\n plt.text(xyz[0], xyz[1], text,\n fontsize=font_size, color=(0.5, 0, 0.5))\n\n for(ID, (curveName, pointIDs, marker, elementsOnCurve, _, _)) in geometry.curves.items():\n points = geometry.getPointCoords(pointIDs)\n if curveName == \"Spline\":\n P = _catmullspline(points, N)\n if curveName == \"BSpline\":\n P = _bspline(points, N)\n if curveName == \"Circle\":\n P = _circleArc(*points, pointsOnCurve=N)\n if curveName == \"Ellipse\":\n P = _ellipseArc(*points, pointsOnCurve=N)\n # plotArgs = {'lc':'k', 'ms':None, 'axesAdjust':False, 'axes':axes} #Args for plot style. Black lines with no symbols at points.\n\n # Args for plot style. Black lines with no symbols at points.\n plotArgs = {\"color\": \"black\"}\n\n if geometry.is3D:\n plt.plot(P[:, 0], P[:, 1], P[:, 2], **plotArgs)\n else:\n plt.plot(P[:, 0], P[:, 1], **plotArgs)\n\n if label_curves:\n # Sort of midpoint along the curve. Where the text goes.\n midP = P[int(P.shape[0]*7.0/12), :].tolist()\n # Create the text for the curve. Includes ID, elementsOnCurve, and marker:\n text = \" \"+str(ID)\n text += \"(%s)\" % (elementsOnCurve) if elementsOnCurve is not None else ''\n # Something like \"4(5)[8]\"\n text += \"[%s]\" % (marker) if marker is not 0 else ''\n plt.text(midP[0], midP[1], text, fontsize=font_size)\n\n if title != None:\n plt.title(title)\n\n min_x, max_x, min_y, max_y = geometry.bounding_box_2d()\n\n g_width = max_x - min_x\n g_height = max_y - min_y\n\n if g_width > g_height:\n margin = rel_margin*g_width\n else:\n margin = rel_margin*g_height\n\n bottom, top = ax.get_ylim()\n left, right = ax.get_xlim()\n ax.set_ylim(bottom-margin, top+margin)\n ax.set_xlim(left-margin, right+margin)\n\n # if axesAdjust:\n # _adjustaxes(axes, geoData.is3D)\n #axes.daspectAuto = False\n #axes.daspect = (1,1,1)\n\n# drawGeometry = draw_geometry\n\n\ndef _catmullspline(controlPoints, pointsOnEachSegment=10):\n \"\"\"\n Returns points on a Catmull-Rom spline that interpolated the control points.\n Inital/end tangents are created by mirroring the second/second-to-last)\n control points in the first/last points.\n\n Params:\n controlPoints - Numpy array containing the control points of the spline.\n Each row should contain the x,y,(z) values.\n [[x1, y2],\n [x2, y2],\n ...\n [xn, yn]]\n\n pointsOnEachSegment - The number of points on each segment of the curve.\n If there are n control points and k samplesPerSegment,\n then there will be (n+1)*k numeric points on the curve.\n \"\"\"\n controlPoints = np.asarray(\n controlPoints) # Convert to array if input is a list.\n if (controlPoints[0, :] == controlPoints[-1, :]).all():\n # If the curve is closed we extend each opposite endpoint to the other side\n CPs = np.asmatrix(np.vstack((controlPoints[-2, :],\n controlPoints,\n controlPoints[1, :])))\n else: # Else make mirrored endpoints:\n CPs = np.asmatrix(np.vstack((2*controlPoints[0, :] - controlPoints[1, :],\n controlPoints,\n 2*controlPoints[-1, :] - controlPoints[-2, :])))\n M = 0.5 * np.matrix([[0, 2, 0, 0], [-1, 0, 1, 0],\n [2, -5, 4, -1], [-1, 3, -3, 1]])\n t = np.linspace(0, 1, pointsOnEachSegment)\n T = np.matrix([[1, s, pow(s, 2), pow(s, 3)] for s in t])\n return np.asarray(np.vstack([T * M * CPs[j-1:j+3, :] for j in range(1, len(CPs)-2)]))\n\n\ndef _bspline(controlPoints, pointsOnCurve=20):\n '''\n Uniform cubic B-spline.\n\n Params:\n controlPoints - Control points. Numpy array. One coordinate per row.\n pointsOnCurve - number of sub points per segment\n\n Mirrored start- and end-points are added if the curve is not closed.\n If the curve is closed some points are duplicated to make the closed\n spline continuous.\n (See http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-curve-closed.html)\n\n Based on descriptions on:\n http://www.siggraph.org/education/materials/HyperGraph/modeling/splines/b_spline.htm\n http://en.wikipedia.org/wiki/B-spline#Uniform_cubic_B-splines\n '''\n controlPoints = np.asarray(\n controlPoints) # Convert to array if input is a list.\n if (controlPoints[0, :] == controlPoints[-1, :]).all():\n # If the curve is closed we extend each opposite endpoint to the other side\n CPs = np.asmatrix(np.vstack((controlPoints[-2, :],\n controlPoints,\n controlPoints[1, :])))\n else: # Else make mirrored endpoints:\n CPs = np.asmatrix(np.vstack((2*controlPoints[0, :] - controlPoints[1, :],\n controlPoints,\n 2*controlPoints[-1, :] - controlPoints[-2, :])))\n M = (1.0/6) * np.matrix([[-1, 3, -3, 1],\n [3, -6, 3, 0],\n [-3, 0, 3, 0],\n [1, 4, 1, 0]])\n t = np.linspace(0, 1, pointsOnCurve)\n T = np.matrix([[pow(s, 3), pow(s, 2), s, 1] for s in t])\n\n return np.asarray(np.vstack([T * M * CPs[i-1: i+3, :] for i in range(1, len(CPs)-2)]))\n\n\ndef _circleArc(start, center, end, pointsOnCurve=20):\n return _ellipseArc(start, center, start, end, pointsOnCurve)\n\n\ndef _ellipseArc(start, center, majAxP, end, pointsOnCurve=20):\n '''Input are 3D 1-by-3 numpy arrays or vectors'''\n # First part is to find a similarity transform in 3D that transform the ellipse to\n # the XY-plane with the center at the origin and the major axis of the ellipse along the X-axis.\n\n # convert to arrays in case inputs are lists:\n start, center, majAxP, end, = np.asarray(start), np.asarray(\n center), np.asarray(majAxP), np.asarray(end)\n\n zPrim = np.cross(start-center, end-center)\n zPrim = zPrim / np.linalg.norm(zPrim)\n xPrim = (majAxP-center) / np.linalg.norm(majAxP-center)\n yPrim = np.cross(zPrim, xPrim)\n\n # Rotation matrix from ordinary coords to system where ellipse is in the XY-plane. (Actually hstack)\n R = np.vstack((xPrim, yPrim, zPrim)).T\n # Building Transformation matrix. -center is translation vector from ellipse center to origin.\n T = np.hstack((R, np.asmatrix(center).T))\n # Transformation matrix for homogenous coordinates.\n T = np.mat(np.vstack((T, [0, 0, 0, 1])))\n\n startHC = np.vstack((np.matrix(start).T, [1]))\n # start and end points as column vectors in homogenous coordinates\n endHC = np.vstack((np.matrix(end).T, [1]))\n\n s = np.linalg.inv(T) * startHC\n # start and end points in the new coordinate system\n e = np.linalg.inv(T) * endHC\n\n xs, ys = s[0, 0], s[1, 0]\n # Just extract x & y from the new start and endpoints\n xe, ye = e[0, 0], e[1, 0]\n\n a = np.sqrt((pow(ye*xs, 2) - pow(xe*ys, 2)) / (pow(ye, 2) - pow(ys, 2)))\n b = np.sqrt((pow(ye*xs, 2) - pow(xe*ys, 2)) / ((pow(ye, 2) - pow(ys, 2))\n * ((pow(xe, 2) - pow(xs, 2)) / (pow(ys, 2) - pow(ye, 2)))))\n\n # atan2 is a function that goes from -pi to pi. It gives the signed angle from the X-axis to point (y,x)\n ts = atan2(ys/b, xs/a)\n # We can't use the (transformed) start- and endpoints directly, but we divide x and y by the\n te = atan2(ye/b, xe/a)\n # ellipse minor&major axes to get the parameter t that corresponds to the point on the ellipse.\n # See ellipse formula: x = a * cos (t), y = b * sin(t).\n # So ts and te are the parameter values of the start- and endpoints (in the transformed coordinate system).\n\n if ts > te:\n # swap if the start point comes before the endpoint in the parametric parameter that goes around the ellipse.\n ts, te = te, ts\n if te - ts < np.pi:\n # parameter of ellipse curve. NOT angle to point on curve (like it could be for a circle).\n times = np.linspace(ts, te, pointsOnCurve)\n # the shortest parameter distance between start- and end-point stradles the discontinuity that jumps from pi to -pi.\n else:\n # number of points on the first length.\n ps1 = round(pointsOnCurve * (pi-te)/(2*pi-te+ts))\n # number of points on the first length.\n ps2 = round(pointsOnCurve * (ts+pi)/(2*pi-te+ts))\n times = np.concatenate(\n (np.linspace(te, pi, ps1), np.linspace(-pi, ts, ps2)))\n\n ellArc = np.array([[a*cos(t), b*sin(t)]\n for t in times]).T # points on arc (in 2D)\n # Make 3D homogenous coords by adding rows of 0s and 1s.\n ellArc = np.vstack(\n (ellArc, np.repeat(np.matrix([[0], [1]]), ellArc.shape[1], 1)))\n ellArc = T * ellArc # Transform back to the original coordinate system\n return np.asarray(ellArc.T[:, 0:3]) # return points as an N-by-3 array.\n\n\ndef eldraw2(ex, ey, plotpar=[1, 2, 1], elnum=[]):\n \"\"\"\n eldraw2(ex,ey,plotpar,elnum)\n eldraw2(ex,ey,plotpar)\n eldraw2(ex,ey)\n\n PURPOSE\n Draw the undeformed 2D mesh for a number of elements of\n the same type. Supported elements are:\n\n 1) -> bar element 2) -> beam el.\n 3) -> triangular 3 node el. 4) -> quadrilateral 4 node el.\n 5) -> 8-node isopar. elemen\n\n INPUT\n ex,ey:.......... nen: number of element nodes\n nel: number of elements\n plotpar=[ linetype, linecolor, nodemark]\n\n linetype=1 -> solid linecolor=1 -> black\n 2 -> dashed 2 -> blue\n 3 -> dotted 3 -> magenta\n 4 -> red\n\n nodemark=1 -> circle\n 2 -> star\n 0 -> no mark\n\n elnum=edof(:,1) ; i.e. the first column in the topology matrix\n\n Rem. Default is solid white lines with circles at nodes.\n \"\"\"\n\n line_type = plotpar[0]\n line_color = plotpar[1]\n node_mark = plotpar[2]\n\n # Translate CALFEM plotpar to visvis\n\n vv_line_type = '-'\n vv_line_color = 'b'\n vv_node_mark = 'o'\n\n if line_type == 1:\n vv_line_type = '-'\n elif line_type == 2:\n vv_line_type = '--'\n elif line_type == 3:\n vv_line_type = ':'\n\n if line_color == 1:\n vv_line_color = 'k'\n elif line_color == 2:\n vv_line_color = 'b'\n elif line_color == 3:\n vv_line_color = 'm'\n elif line_color == 4:\n vv_line_color = 'r'\n\n if node_mark == 1:\n vv_node_mark = 'o'\n elif node_mark == 2:\n vv_node_mark = 'x'\n elif node_mark == 0:\n vv_node_mark = ''\n\n vv_marker_color = vv_line_color\n\n plt.axis('equal')\n\n draw_element_numbers = False\n\n if len(elnum) == ex.shape[0]:\n draw_element_numbers = True\n\n i = 0\n\n for elx, ely in zip(ex, ey):\n x = elx.tolist()\n x.append(elx[0])\n y = ely.tolist()\n y.append(ely[0])\n\n xm = sum(x)/len(x)\n ym = sum(y)/len(y)\n\n plt.plot(x, y, vv_line_color + vv_node_mark + vv_line_type)\n\n\ndef eliso2_mpl(ex, ey, ed):\n\n plt.axis('equal')\n\n print(np.shape(ex))\n print(np.shape(ey))\n print(np.shape(ed))\n\n gx = []\n gy = []\n gz = []\n\n for elx, ely, scl in zip(ex, ey, ed):\n for x in elx:\n gx.append(x)\n for y in ely:\n gy.append(y)\n for z in ely:\n gz.append(y)\n\n plt.tricontour(gx, gy, gz, 5)\n" ]
[ [ "numpy.cross", "numpy.asarray", "numpy.asmatrix", "matplotlib.path.Path", "matplotlib.pyplot.plot", "numpy.vstack", "numpy.allclose", "matplotlib.pyplot.tricontour", "matplotlib.pyplot.gca", "matplotlib.pyplot.gcf", "matplotlib.pyplot.figure", "numpy.reshape", "numpy.cos", "matplotlib.pyplot.title", "matplotlib.pyplot.text", "matplotlib.patches.PathPatch", "matplotlib.backends.backend_qt5agg.NavigationToolbar2QT", "numpy.linspace", "numpy.zeros", "matplotlib.pyplot.axis", "matplotlib.pyplot.clf", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "matplotlib.pyplot.colorbar", "numpy.linalg.norm", "matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg", "numpy.empty", "numpy.linalg.inv", "numpy.matrix", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.shape", "numpy.array", "numpy.sin", "numpy.concatenate" ] ]
g-parki/bokeh
[ "664ead5306bba64609e734d4105c8aa8cfb76d81" ]
[ "examples/plotting/file/slider.py" ]
[ "''' An interactive plot of the ``sin`` function. This example demonstrates\nadding widgets and ``CustomJS`` callbacks that can update a plot.\n\n.. bokeh-example-metadata::\n :apis: bokeh.plotting.Figure.line, bokeh.layouts.column, bokeh.layouts.row, bokeh.models.callbacks.CustomJS, bokeh.models.widgets.sliders.Slider\n :refs: :ref:`userguide_interaction_jscallbacks` > :ref:`userguide_interaction_jscallbacks_customjs`\n :keywords: javascript callback\n\n'''\nimport numpy as np\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Slider\nfrom bokeh.plotting import ColumnDataSource, figure, show\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), width=400, height=400)\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\")\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\")\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\")\noffset_slider = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\")\n\ncallback = CustomJS(args=dict(source=source, amp=amp_slider, freq=freq_slider, phase=phase_slider, offset=offset_slider),\n code=\"\"\"\n const data = source.data;\n const A = amp.value;\n const k = freq.value;\n const phi = phase.value;\n const B = offset.value;\n const x = data['x']\n const y = data['y']\n for (let i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.change.emit();\n\"\"\")\n\namp_slider.js_on_change('value', callback)\nfreq_slider.js_on_change('value', callback)\nphase_slider.js_on_change('value', callback)\noffset_slider.js_on_change('value', callback)\n\nlayout = row(\n plot,\n column(amp_slider, freq_slider, phase_slider, offset_slider),\n)\n\nshow(layout)\n" ]
[ [ "numpy.sin", "numpy.linspace" ] ]
KESHAmambo/IEEE_802_11ad_beamforming_simulation
[ "93328a41d9c044ee7596d02e360fb3b5f2250ec0" ]
[ "graph/ap (0, 0, 0), 12 sect, mob 4 sect/dynamic/degree variation, 3 slots, 25 stations/with 0/avg_dist.py" ]
[ "\"\"\"\nDistribution plot options\n=========================\n\n\"\"\"\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\navgArr0 = [684.0322959592726, 884.7363009861817, 888.8322884189091, 942.080300986182, 970.7522934458182, 991.2322959592727, 991.2323009861818, 1011.712300986182, 1036.288295959273, 1044.4803034996364, 1060.8642859054546, 1069.056306013091, 1077.2482984727276, 1122.304306013091, 1130.4963034996365, 1134.5923034996363, 1142.7843009861817, 1146.8802884189092, 1159.1682984727274, 1163.2643085265454, 1224.7042884189093, 1232.8963110400005, 1249.280295959273, 1261.5682934458184, 1265.6642833920002, 1282.0483060130912, 1290.2402984727278, 1298.4323110400003, 1331.2003034996367, 1351.680290932364, 1363.968290932364, 1363.9682959592733, 1409.0243009861824, 1409.024303499637, 1413.1202984727277, 1413.120306013091, 1429.5042884189097, 1429.5043060130913, 1470.464293445819, 1495.0402959592734, 1527.808285905455, 1556.4803034996369, 1593.3442934458185, 1613.8243009861826, 1634.3042859054556, 1638.400285905455, 1650.6882884189097, 1662.9763009861827, 1699.8403085265466, 1740.800303499637, 1757.1842959592736, 1757.1843009861827, 1761.2803160669096, 1781.7602909323643, 1794.0482959592732, 1794.0483034996373, 1802.2402909323644, 1810.432298472728, 1822.7202859054553, 1830.9122959592737, 1843.2003034996371, 1855.4882959592733, 1863.680295959274, 1867.7763009861828, 1892.3522909323647, 1908.736311040001, 1929.21628841891, 1982.4643135534557, 2002.944295959274, 2048.0002984727284, 2064.3842859054553, 2158.5922934458195, 2207.7442934458195, 2228.2242833920013, 2240.512300986183, 2269.1842884189105, 2285.568283392001, 2338.8162984727287, 2351.104295959274, 2416.6402984727292, 2424.832290932365, 2461.696298472729, 2478.080295959274, 2568.1922934458194, 2654.20829344582, 2715.648303499638, 2756.6083009861827, 2801.66429344582, 2936.8322909323656, 2990.0803160669107, 3043.3282984727293, 3117.0563210938194, 3125.2482959592744, 3170.304298472728, 3280.896278365093, 3379.200290932365, 3506.176295959273, 3850.240300986182, 3887.104285905455, 5201.920300986177]\navgArr1 = [262.14550912000004, 266.2412008029091, 270.33744209454545, 278.5297370065454, 278.529804032, 282.62589786763635, 286.72168338618184, 286.7217504116363, 286.72184424727277, 286.72195148800006, 286.72197829818185, 286.72203191854555, 286.7223670458182, 290.81752252509096, 290.81765657600005, 290.8178442472727, 290.81785765236367, 290.81824639999996, 294.9136699810909, 294.9136967912728, 294.91380403200003, 294.9138576523636, 294.91391127272726, 294.91393808290906, 294.91415256436363, 299.00956274036366, 299.0096699810909, 299.0097772218182, 299.0098710574546, 299.01011234909095, 299.01017937454543, 299.0102464, 299.0103268305455, 299.01039385600006, 303.1053348538182, 303.1056163607273, 303.1056833861818, 303.10569679127275, 303.1057101963637, 303.1058576523637, 303.10588446254553, 303.10589786763643, 303.1059112727273, 303.1060587287273, 303.10623299490913, 307.2014823098182, 307.20156274036367, 307.20166998109096, 307.20184424727273, 307.20213915927275, 307.2021659694545, 307.2021659694546, 307.20225980509093, 307.20235364072727, 307.2026753629091, 311.29757614545457, 311.29758955054547, 311.2976431709091, 311.29772360145455, 311.29780403200004, 311.2978710574546, 311.2978978676364, 311.2979112727273, 311.2979782981818, 311.29801851345456, 311.29825980509094, 311.2982732101819, 311.2986351476364, 315.3934152843637, 315.3934957149091, 315.3935225250909, 315.39369679127276, 315.39371019636366, 315.39371019636366, 315.3937504116364, 315.39380403200005, 315.393951488, 315.3939514880001, 315.3939782981819, 315.39415256436365, 315.39484962909097, 319.4894689047273, 319.4895761454546, 319.48960295563637, 319.48961636072727, 319.489656576, 319.4897772218182, 319.4897906269091, 319.48981743709095, 319.48989786763644, 319.4900051083636, 319.49015256436377, 319.4905145018182, 319.4905547170909, 319.49078260363643, 319.4908094138182, 323.58536166400006, 323.5856163607273, 323.5856163607273, 323.58564317090907, 323.5856699810909, 323.5857638167273, 323.58580403200006, 323.5858174370909, 323.58585765236364, 323.5858576523637, 323.58589786763645, 323.5859380829091, 323.5860185134545, 323.5860721338182, 323.5860721338182, 323.58608553890906, 323.58612575418186, 323.58613915927276, 323.58613915927276, 323.58613915927276, 323.58625980509095, 323.5864340712728, 323.5865145018182, 323.5865681221818, 323.58679600872733, 327.6815493352728, 327.6816967912727, 327.68169679127277, 327.68179062690905, 327.68187105745454, 327.68192467781824, 327.6819380829092, 327.68196489309094, 327.6820319185455, 327.68204532363643, 327.68205872872727, 327.68205872872727, 327.68207213381817, 327.68208553890906, 327.68213915927277, 327.6821927796364, 327.68220618472725, 327.68228661527274, 327.68232683054543, 327.6823268305455, 327.6823536407273, 327.6823670458182, 327.6824742865455, 327.6824876916364, 327.6831579461819, 331.77730804363637, 331.77741528436366, 331.77741528436366, 331.77742868945455, 331.77757614545453, 331.77758955054543, 331.7776297658182, 331.77777722181816, 331.77791127272735, 331.77792467781813, 331.77797829818184, 331.77797829818184, 331.7779917032728, 331.7779917032728, 331.77800510836363, 331.7780453236363, 331.77805872872733, 331.7781525643636, 331.77819277963636, 331.7782061847273, 331.77842066618183, 331.7784340712727, 331.7784340712728, 331.7790909207274, 335.873656576, 335.873656576, 335.8736967912727, 335.87379062690917, 335.8738308421818, 335.87388446254545, 335.87388446254556, 335.8738978676364, 335.8739246778183, 335.87393808290915, 335.8739782981818, 335.8739917032728, 335.87400510836363, 335.8740587287273, 335.87416596945457, 335.8741793745454, 335.8741793745455, 335.87419277963636, 335.87421958981815, 335.87423299490916, 335.8743134254546, 335.8743402356364, 335.87440726109094, 335.87450109672733, 339.96957614545454, 339.969656576, 339.96966998109093, 339.9696833861819, 339.9696833861819, 339.9696967912727, 339.96975041163637, 339.9697772218182, 339.9697772218182, 339.96983084218186, 339.96984424727276, 339.96988446254545, 339.9699246778182, 339.969951488, 339.969951488, 339.9699782981818, 339.96999170327274, 339.97000510836364, 339.97001851345453, 339.9700587287273, 339.9700587287273, 339.97007213381823, 339.9701525643637, 339.9701659694546, 339.9703000203636, 344.0654823098182, 344.0656163607273, 344.06565657600004, 344.0657236014545, 344.0658308421818, 344.06587105745456, 344.0659380829091, 344.0659917032727, 344.06604532363633, 344.0661391592727, 344.0661927796364, 344.06628661527276, 344.06632683054545, 344.06643407127274, 344.06690324945464, 344.06718475636364, 348.1612946385454, 348.16176381672733, 348.1618576523636, 348.16187105745456, 348.16191127272725, 348.1619112727273, 348.1619112727273, 348.16192467781815, 348.1619917032728, 348.16200510836364, 348.1620319185455, 348.16207213381824, 348.1621257541819, 348.1621659694546, 348.1621793745455, 348.1622061847273, 348.16225980509097, 348.1623804509091, 348.162688768, 352.25761636072735, 352.2577236014546, 352.2577370065454, 352.25788446254546, 352.2578978676364, 352.2579380829091, 352.2579380829091, 352.25801851345454, 352.25803191854544, 352.2580453236364, 352.25805872872735, 352.2580855389091, 352.25811234909094, 352.2581257541819, 352.2581659694546, 352.2581793745454, 352.25817937454553, 352.2582329949091, 352.2582464000001, 352.2582598050909, 352.2582866152727, 352.2583536407272, 352.2584072610909, 352.2586083374546, 352.258702173091, 352.25884962909106, 356.35349571490906, 356.35364317090904, 356.3537370065455, 356.35380403199997, 356.3538174370909, 356.3538710574545, 356.3538710574546, 356.35388446254547, 356.35393808290905, 356.3540051083637, 356.35401851345455, 356.3540721338182, 356.35411234909094, 356.3541793745455, 356.3542732101819, 356.35428661527277, 356.3543134254545, 356.354393856, 356.35444747636365, 356.3544876916364, 356.35450109672723, 356.3545279069091, 356.35452790690914, 356.35466195781817, 356.3552517818182, 360.4494554996363, 360.4495627403636, 360.4497370065455, 360.4497638167273, 360.44977722181824, 360.44988446254547, 360.44989786763637, 360.44995148800007, 360.45012575418184, 360.45013915927274, 360.4501659694546, 360.4501927796363, 360.4502464, 360.45032683054546, 360.4503268305455, 360.4503536407273, 360.450393856, 360.45043407127275, 360.45051450181825, 360.45051450181825, 360.45087643927286, 360.4510641105454, 360.4515332887273, 364.54554933527265, 364.5456699810909, 364.54568338618185, 364.5457370065454, 364.54573700654544, 364.5458174370908, 364.5458576523636, 364.54589786763637, 364.54597829818186, 364.54607213381814, 364.54616596945453, 364.54617937454555, 364.5462732101819, 364.5463134254545, 364.5463402356363, 364.5463670458182, 364.5463804509091, 364.54643407127276, 364.5464474763636, 364.5464742865455, 364.5465010967273, 364.5465547170909, 364.54660833745453, 364.54684962909096, 364.5469032494546, 368.64156274036367, 368.6416431709091, 368.6418174370909, 368.6419246778181, 368.64204532363635, 368.6420721338182, 368.64209894399994, 368.64211234909095, 368.64225980509093, 368.6422732101818, 368.6422866152727, 368.6423536407272, 368.6423536407273, 368.6423804509091, 368.6424340712727, 368.6424340712727, 368.64244747636366, 368.6426351476364, 368.6426619578182, 368.64267536290913, 368.64303730036363, 368.6433590225455, 372.7374957149091, 372.73772360145455, 372.73773700654544, 372.7377504116364, 372.7378844625455, 372.7379246778182, 372.73801851345456, 372.73811234909095, 372.73813915927275, 372.73823299490914, 372.7382866152727, 372.7385279069091, 372.73856812218185, 372.73862174254543, 372.73864855272734, 372.738688768, 372.7387960087272, 372.73891665454545, 376.83368338618175, 376.83377722181814, 376.8338710574546, 376.8339246778181, 376.83392467781823, 376.83396489309087, 376.8339782981819, 376.8339782981819, 376.83412575418185, 376.8342329949091, 376.83428661527273, 376.83436704581817, 376.8344072610909, 376.83447428654546, 376.8347289832728, 376.83487643927276, 376.8353054021819, 380.9296967912727, 380.92991127272734, 380.9299380829091, 380.9300453236364, 380.93013915927276, 380.93016596945455, 380.93021958981814, 380.93031342545453, 380.9303268305455, 380.9305547170909, 380.9307423883636, 380.93082281890906, 380.93119816145446, 385.02557614545447, 385.02576381672736, 385.02577722181815, 385.02588446254543, 385.02588446254543, 385.02612575418175, 385.0262464, 385.02638045090913, 385.02640726109087, 385.0264474763636, 385.0265815272727, 385.02667536290903, 385.0267960087273, 385.0269434647273, 389.1215761454546, 389.1217772218182, 389.1220185134545, 389.1228630341818, 389.1233188072728, 393.2177772218181, 393.21777722181815, 393.21792467781813, 393.21792467781825, 393.21808553890907, 393.21891665454547, 397.3138442472728, 397.3139246778182, 397.3139782981819, 397.3139917032726, 397.31401851345464, 397.3140855389091, 397.314098944, 397.31419277963636, 397.3143268305455, 397.3144072610908, 397.3145547170908, 397.3146753629091, 397.31507751563635, 401.40975041163637, 401.40975041163637, 401.4098576523637, 401.40996489309094, 401.4100721338182, 401.41028661527275, 401.4103670458182, 401.4103804509091, 401.4105547170909, 401.4106083374546, 405.5054689047273, 405.50566998109093, 405.5059246778182, 405.50609894400003, 405.50627321018175, 405.5071177309091, 405.50729199709093, 409.60160295563634, 409.6025681221818, 409.6026083374545, 409.6028496290908, 413.69839385600005, 413.69887643927274, 417.79372360145453, 417.79440726109084, 421.8898308421817, 421.8904206661819, 425.9858174370908, 425.98608553890915, 430.0823134254546, 434.178688768, 438.2742061847272, 442.37005872872726, 446.4660319185454, 446.46613915927264, 446.4663134254545, 462.850232994909, 475.13820618472715, 479.2343000203636]\navgArr3 = [159.74953064727276, 159.7500266356364, 159.75250657745457, 159.7535253643637, 163.8445386705455, 163.8447129367273, 163.84507487418185, 163.84571831854552, 163.84864062836368, 163.84909640145463, 163.8494583389091, 163.85055755636367, 167.94163788800003, 167.94210706618185, 167.9431928785455, 167.94330011927275, 167.9443725265455, 167.94441274181824, 167.94531088290915, 167.9473082414546, 167.9500026647273, 167.95037800727277, 172.03707487418185, 172.03794620509095, 172.03813387636364, 172.03899180218184, 172.0390856378182, 172.040292096, 172.04031890618188, 172.04077467927277, 172.04124385745456, 172.04187389672734, 172.04312057018186, 172.0431741905455, 172.04322781090914, 172.0434959127273, 172.0440723316364, 172.04462194036364, 172.04608309527276, 176.13269953163638, 176.1335708625455, 176.1337585338182, 176.13471029527278, 176.13515266327278, 176.13523309381824, 176.13544757527276, 176.13614464000003, 176.13674786909098, 176.13775325090916, 176.13788730181818, 176.138061568, 176.1380749730909, 176.13836988509095, 176.13899992436365, 176.13899992436367, 176.13908035490914, 176.14080961163643, 176.14094366254548, 176.1420160698182, 176.14225736145454, 176.1424182225455, 176.1428471854545, 176.14304826181822, 176.1435442501819, 176.14471049309097, 180.22835099927272, 180.22943681163642, 180.22957086254547, 180.2298791796364, 180.23008025600004, 180.23013387636365, 180.23036176290913, 180.23048240872728, 180.23111244800006, 180.2311794734546, 180.23139395490915, 180.23143417018184, 180.2321312349091, 180.23237252654548, 180.23238593163637, 180.23332428800006, 180.2334181236364, 180.2336728203637, 180.23408837818184, 180.23438329018182, 180.23477203781823, 180.23482565818185, 180.23483906327277, 180.23506694981822, 180.23517419054548, 180.23545569745457, 180.23545569745457, 180.23579082472727, 180.23703749818185, 180.23715814400003, 180.24111264581822, 180.24152820363642, 184.32385501090914, 184.32453867054548, 184.325490432, 184.32549043200004, 184.32557086254548, 184.3255976727273, 184.3261338763637, 184.32641538327277, 184.32646900363642, 184.32649581381818, 184.32668348509094, 184.32708563781824, 184.32716606836365, 184.32731352436366, 184.32786313309094, 184.32787653818184, 184.3279435636364, 184.32803739927277, 184.3281044247273, 184.32814464000003, 184.32817145018188, 184.3282518807273, 184.328292096, 184.3283189061818, 184.32833231127276, 184.32843955200002, 184.3286808436364, 184.3288148945455, 184.32897575563638, 184.32932428800004, 184.3296728203637, 184.32988730181825, 184.33074522763638, 184.33082565818185, 184.3312010007273, 184.33137526690913, 184.33205892654547, 184.33228681309092, 184.3323940538182, 184.33366753745457, 184.33380158836368, 184.33503485672728, 184.33559787054548, 184.33983387927273, 188.42017673309093, 188.42104806400005, 188.4211821149091, 188.42198642036365, 188.4220400407273, 188.42297839709096, 188.42304542254547, 188.42320628363643, 188.42338054981818, 188.42344757527277, 188.42362184145455, 188.42367546181825, 188.42375589236366, 188.4237827025455, 188.42383632290915, 188.4242116654546, 188.4244931723637, 188.42466743854547, 188.4249087301818, 188.4250829963637, 188.42509640145457, 188.42564601018185, 188.42586049163637, 188.42615540363636, 188.42639669527273, 188.42670501236367, 188.42721440581823, 188.42746910254547, 188.4274959127273, 188.4283538385455, 188.42835383854552, 188.42897047272726, 188.42919835927273, 188.43051205818188, 188.43161127563639, 188.43209385890913, 188.43289816436365, 192.51616332800003, 192.51671293672734, 192.51679336727275, 192.5171418996364, 192.51747702690912, 192.5178657745455, 192.5178657745455, 192.51791939490911, 192.51821430690913, 192.5184421934546, 192.51850921890917, 192.51850921890917, 192.51860305454545, 192.51939395490916, 192.5199301585455, 192.52001058909096, 192.52023847563635, 192.5202384756364, 192.5205333876364, 192.5205736029091, 192.520587008, 192.5206808436364, 192.52072105890915, 192.52086851490913, 192.52092213527274, 192.5214315287273, 192.5216728203637, 192.52206156800003, 192.52231626472735, 192.52298651927273, 192.52326802618182, 192.52365677381823, 192.52365677381823, 192.52403211636363, 192.52447448436368, 192.52448788945455, 192.52484982690913, 192.52795980800002, 192.52812066909092, 192.53021186327277, 196.6119890618182, 196.61229737890915, 196.61248505018185, 196.6129274181818, 196.61296763345453, 196.61350383709097, 196.61377193890917, 196.61383896436368, 196.61416068654546, 196.6143081425455, 196.61440197818186, 196.61446900363634, 196.6153269294546, 196.61544757527278, 196.6156620567273, 196.61567546181826, 196.6159703738182, 196.61609101963637, 196.61668084363635, 196.61676127418184, 196.61708299636368, 196.61727066763638, 196.6174047185455, 196.61776665600001, 196.6180347578182, 196.61808837818185, 196.61877203781825, 196.61894630400005, 196.61925462109096, 196.6196031534546, 196.61962996363636, 196.619683584, 196.61991147054547, 196.62042086400007, 196.62339679418187, 196.62456303709092, 200.70671557818187, 200.70715794618184, 200.7082571636364, 200.70883358254548, 200.70918211490908, 200.7094502167273, 200.71006685090913, 200.71010706618188, 200.71085775127275, 200.71123309381815, 200.71127330909093, 200.7113269294546, 200.71150119563637, 200.7115011956364, 200.71188994327272, 200.71219826036366, 200.71273446400002, 200.71292213527278, 200.7130427810909, 200.71321704727274, 200.7144369105455, 200.7145977716364, 200.71463798690914, 200.714651392, 200.71506694981818, 200.7159114705455, 200.71611254690913, 200.7162063825455, 200.7166889658182, 200.71756029672727, 200.72175609018186, 204.80346626327275, 204.8042705687273, 204.8047263418182, 204.8049944436364, 204.80641538327274, 204.80724649890917, 204.80763524654552, 204.80778270254544, 204.8082116654546, 204.80996773236362, 204.8101017832727, 204.810209024, 204.81036988509095, 204.81038329018185, 204.81061117672735, 204.81089268363633, 204.81220638254547, 204.81302409309095, 204.81303749818187, 204.81329219490908, 204.81342624581828, 204.81382839854544, 204.8139758545455, 208.90033759418188, 208.90136978618182, 208.9019998254546, 208.9021472814546, 208.90256283927272, 208.9026566749091, 208.90267008000004, 208.90287115636372, 208.9032733090909, 208.90398377890912, 208.9041044247273, 208.9060749730909, 208.90649053090914, 208.90691949381824, 208.9072546210909, 208.9085281047273, 208.91110188218184, 212.99542604800007, 212.99610970763638, 212.99632418909093, 212.9963912145455, 212.9965922909091, 212.99660569600005, 212.99677996218182, 212.99696763345457, 212.99698103854553, 212.99706146909094, 212.99722233018184, 212.99723573527274, 212.99723573527277, 212.99761107781822, 212.99763788800007, 212.99824111709094, 212.99883094109092, 212.99904542254546, 212.99978270254545, 213.00015804509093, 213.00017145018185, 213.00039933672733, 213.00049317236363, 213.0005065774546, 213.00133769309093, 213.00143152872732, 213.00186049163642, 213.00188730181824, 213.0031607854546, 213.00341548218185, 213.0037506094546, 213.00439405381826, 213.00525197963637, 217.09126518690908, 217.09137242763637, 217.09151988363638, 217.0919890618182, 217.09210970763638, 217.09272634181823, 217.09357086254545, 217.0935976727273, 217.0940132305455, 217.09429473745456, 217.09475051054545, 217.09832966981818, 217.09862458181814, 217.0990133294546, 217.1058365207273, 221.18764052945463, 221.1884314298182, 221.18852526545456, 221.18860569600005, 221.18880677236368, 221.18919552000003, 221.18920892509092, 221.1894368116364, 221.18945021672732, 221.18982555927275, 221.19176929745456, 221.1949328989091, 221.19501332945455, 221.19509376000005, 225.28317135127276, 225.28388182109092, 225.28443142981823, 225.285048064, 225.28567810327277, 225.2867639156364, 225.28721968872733, 225.28811782981825, 225.28873446400004, 225.28916342690914, 225.29230021818188, 229.37842066618188, 229.3793188072727, 229.37976117527273, 229.37981479563638, 229.38024375854548, 229.38043142981826, 229.38071293672735, 229.38102125381826, 229.38145021672727, 229.38230814254553, 229.38273710545462, 229.38289796654553, 229.38358162618192, 229.38438593163642, 229.38449317236365, 229.38464062836366, 233.47510432581822, 233.47554669381825, 233.4758416058182, 233.4763509992728, 233.4773161658182, 233.47742340654548, 233.47766469818188, 233.4782411170909, 233.47857624436364, 233.47864326981824, 233.47899180218184, 233.47919287854546, 233.47928671418188, 233.47940736000004, 233.4816728203637, 233.48324121600007, 233.48647184290917, 237.57224375854545, 237.5724046196364, 237.57311508945463, 237.57345021672734, 237.57414728145457, 237.57475051054544, 237.57477732072726, 237.57491137163638, 237.5751794734546, 237.57590334836365, 241.66725178181824, 241.66825716363638, 241.6682571636364, 241.66869953163638, 241.67042878836364, 241.67167546181827, 241.6718899432728, 241.67336450327267, 241.6745173410909, 245.7634126429091, 245.7635466938182, 245.7645118603637, 245.76549043199998, 245.76605344581822, 245.76764865163636, 245.7682786909091, 249.85941264290912, 249.86016332800006, 249.86052526545456, 249.86094082327276, 249.8609542283637, 249.8611687098182, 249.86340736000002, 249.8661151883636, 249.8665709614546, 253.95542604799996, 253.9591794734546, 253.96553348654547, 258.05137242763635, 258.0516673396364, 262.1471043258182, 262.1472383767273, 262.1495842676364, 262.1515011956364, 266.244310784, 266.24467272145455, 266.2475414109091, 270.3394662632727, 270.3405252654546, 270.34075315200005, 270.34336714472727, 274.43581479563636, 274.4368067723636, 278.5308898443637, 278.53145285818186, 282.6261525643637, 282.6286459112727, 286.7246191010909, 286.7248737978182, 290.8225092189091, 290.8246406283636, 299.0112651869091, 299.0121901381819, 303.10796225163637, 303.11103201745453, 303.11426264436363, 311.3006727214546, 327.68323837672733, 348.1659596101818]\navgArr5 = [122.88727330909094, 126.99153084509094, 135.17265931636368, 135.17791411200002, 135.1780481629091, 135.18060853527274, 135.18133241018185, 135.18613143272728, 135.1867480669091, 135.18723065018187, 135.2095769367273, 135.2254351592728, 139.27024111709096, 139.27369963054548, 139.28205100218184, 139.2851609832728, 139.2853218443637, 139.32260140218187, 139.32307058036366, 143.36506146909093, 143.36512849454547, 143.36616068654544, 143.36862722327277, 143.37016880872727, 143.3707318225455, 143.372125952, 143.37226000290912, 143.3749008058182, 143.37817164800003, 143.37939151127276, 143.38065158981823, 143.3907992436364, 143.39259552581822, 147.46077996218185, 147.4625896494546, 147.4636620567273, 147.46465403345456, 147.4646540334546, 147.46478808436368, 147.46567282036364, 147.4675227229091, 147.46941284072727, 147.47009650036367, 147.4701367156364, 147.47317967127276, 147.4749625483637, 147.47626284218182, 147.47655775418184, 147.4769465018182, 147.47706714763638, 147.47775080727274, 147.47791166836365, 147.4791315316364, 147.47949346909093, 147.47969454545458, 147.47977497600004, 147.48181254981822, 147.48734885236365, 147.48875638690907, 147.4919870138182, 147.49330071272732, 147.49458760145455, 147.5109954327273, 151.55514454109093, 151.5573161658182, 151.55753064727276, 151.55865667490912, 151.55956822109093, 151.55967546181822, 151.5599703738182, 151.56003739927274, 151.56097575563638, 151.56156557963638, 151.5621285934546, 151.5627318225455, 151.56299992436368, 151.56388466036367, 151.56392487563642, 151.5643538385455, 151.56444767418188, 151.56550667636367, 151.56554689163642, 151.5657077527273, 151.5660026647273, 151.56639141236366, 151.56735657890914, 151.56801342836366, 151.56806704872733, 151.5709491432728, 151.57304033745461, 151.57322800872734, 151.57517174690915, 151.57531920290913, 151.57594924218182, 151.5764854458182, 151.57652566109095, 151.5797294778182, 151.58687439127274, 151.58824171054547, 151.59039993018186, 151.5907618676364, 151.62749181672731, 151.63096373527273, 151.64434201600005, 155.65066195781822, 155.6513188072728, 155.65338319127275, 155.65468348509094, 155.6547907258182, 155.6548845614546, 155.65601058909095, 155.65614464, 155.65618485527276, 155.65621166545458, 155.65623847563637, 155.65698916072728, 155.65795432727276, 155.65887927854553, 155.65944229236365, 155.65950931781822, 155.65971039418184, 155.66055491490914, 155.6607425861819, 155.66078280145456, 155.66079620654548, 155.66149327127275, 155.66200266472734, 155.66231098181822, 155.66288740072733, 155.6644692014546, 155.6649651898182, 155.66605100218186, 155.66688211781823, 155.66706978909096, 155.66711000436368, 155.66890628654545, 155.6697910225455, 155.66999209890915, 155.67062213818184, 155.6709706705455, 155.67154708945458, 155.6716409250909, 155.67170795054548, 155.67272673745458, 155.6741074618182, 155.67453642472728, 155.6746972858182, 155.6755418065455, 155.67587693381822, 155.6771370123637, 155.67900032000003, 155.67916118109096, 155.68450981236364, 155.68701656436366, 155.6894428858182, 155.69512664436365, 155.69571646836366, 155.70232517818184, 155.72153467345456, 159.74784160581822, 159.7484180247273, 159.74844483490912, 159.7485252654546, 159.7487665570909, 159.74922233018188, 159.7496915083637, 159.7508041309091, 159.75111244800004, 159.75119287854548, 159.75135373963641, 159.75156822109093, 159.75160843636363, 159.75257360290914, 159.75296235054552, 159.75376665600004, 159.75402135272728, 159.7540347578182, 159.75435648000004, 159.7547854429091, 159.75600530618186, 159.75605892654548, 159.7563672436364, 159.75671577600002, 159.7567962065455, 159.75768094254548, 159.7581367156364, 159.75820374109094, 159.7582171461818, 159.75835119709092, 159.75839141236364, 159.75851205818182, 159.7590214516364, 159.75923593309093, 159.75934317381822, 159.75941019927276, 159.759664896, 159.75981235200004, 159.76029493527275, 159.76101881018184, 159.7619571665455, 159.76261401600004, 159.7627346618182, 159.76280168727274, 159.76514757818185, 159.76530843927276, 159.76615296000003, 159.76632722618183, 159.76632722618183, 159.76634063127278, 159.76710472145456, 159.76809669818184, 159.76963828363637, 159.77069728581824, 159.77146137600002, 159.77197076945455, 159.77631401890912, 159.78150178909092, 159.7834455272728, 159.79100599854547, 159.79164944290912, 159.80555052218185, 159.80632801745458, 159.81101979927274, 159.81839259927276, 159.82126128872733, 159.82929093818188, 163.84290324945457, 163.84333221236366, 163.8439622516364, 163.8441365178182, 163.8442839738182, 163.84480677236365, 163.84507487418185, 163.84507487418185, 163.84510168436364, 163.84524914036368, 163.84549043200005, 163.84565129309095, 163.84569150836364, 163.8456915083637, 163.84612047127277, 163.84701861236368, 163.8470588276364, 163.84780951272734, 163.8478363229091, 163.84787653818185, 163.84831890618185, 163.84834571636367, 163.84851998254547, 163.8486272232727, 163.8488148945455, 163.84916342690914, 163.8494047185455, 163.8505843665455, 163.85085246836363, 163.8513484567273, 163.85258172509094, 163.85335922036367, 163.85475334981822, 163.85479356509094, 163.85495442618182, 163.85522252800004, 163.85530295854548, 163.8574209629091, 163.85764884945456, 163.85823867345454, 163.85849337018186, 163.85908319418186, 163.86113417309093, 163.8612950341818, 163.86221998545457, 163.86342644363634, 163.8642843694546, 163.86546401745457, 163.86557125818183, 163.8657053090909, 163.8665632349091, 163.86663026036368, 163.86687155200005, 163.87108075054547, 163.87274298181822, 163.87445883345455, 163.87945893236363, 163.8803570734546, 163.8808396567273, 163.88456627199997, 163.88503545018185, 163.88522312145454, 163.8960142196364, 163.9115373149091, 163.92691295418186, 167.93929199709092, 167.93930540218184, 167.94035099927274, 167.94079336727273, 167.94083358254545, 167.94108827927275, 167.9411418996364, 167.94147702690913, 167.9436084363637, 167.9439837789091, 167.94414464000005, 167.9446004130909, 167.94466743854548, 167.9447210589091, 167.94493554036364, 167.94498916072732, 167.94560579490908, 167.94636988509095, 167.94659777163642, 167.94673182254547, 167.94673182254547, 167.94768358400003, 167.94787125527276, 167.9482600029091, 167.94902409309097, 167.9501233105455, 167.95063270400004, 167.95112869236368, 167.95189278254549, 167.9526032523637, 167.95273730327278, 167.9528981643637, 167.95657115927276, 167.95925217745454, 167.96008329309095, 167.96037820509096, 167.96074014254546, 167.9621610821819, 167.96364904727278, 167.9638367185455, 167.9645739985455, 167.96582067200004, 167.9669332945455, 167.96706734545455, 167.9675365236364, 167.96929259054545, 167.97245619200004, 167.97885042036364, 167.97944024436364, 167.98515081309094, 167.98781842618183, 167.99146461090916, 168.00708154181822, 168.0103255738182, 172.03431342545457, 172.03541264290914, 172.03668612654548, 172.03690060800002, 172.03696763345454, 172.03795961018187, 172.03838857309097, 172.03852262400002, 172.03919287854552, 172.03919287854552, 172.03983632290914, 172.03986313309096, 172.03993015854547, 172.0402652858182, 172.0406540334546, 172.04223583418184, 172.04337526690912, 172.04545305600004, 172.0488043287273, 172.04931372218184, 172.0495416087273, 172.04994376145459, 172.05015824290913, 172.0538982632728, 172.05478299927276, 172.05596264727276, 172.05809405672736, 172.0583487534546, 172.05934073018187, 172.06065442909096, 172.0626249774546, 172.06464914618184, 172.07540002909093, 172.08361734981818, 172.11956980363638, 176.13113113600005, 176.1312115665455, 176.13184160581818, 176.13188182109093, 176.1331687098182, 176.1335976727273, 176.1350588276364, 176.1350990429091, 176.1352464989091, 176.13530011927273, 176.1356888669091, 176.13669424872728, 176.13929483636366, 176.14282037527275, 176.14409385890914, 176.1467212567273, 176.1467882821819, 176.1487454254546, 176.14921460363638, 176.15212350836364, 176.15534073018182, 180.2269166545455, 180.227439453091, 180.22745285818186, 180.22860569600002, 180.22890060800003, 180.22943681163642, 180.2296915083637, 180.23065667490908, 180.23112585309093, 180.23154141090913, 180.23225188072732, 180.2334851490909, 180.2336862254546, 180.23485246836367, 180.2360857367273, 180.2372251694546, 180.24005364363643, 180.24919591563642, 180.25608613236363, 180.28203838836365, 184.32238045090912, 184.32401587200002, 184.32504806400004, 184.32658964945455, 184.3267907258182, 184.32744757527274, 184.32817145018183, 184.32822507054544, 184.32900256581823, 184.3291232116364, 184.32920364218188, 184.32932428800007, 184.33256832000004, 184.3363887709091, 188.41842066618185, 188.42017673309093, 188.42031078400007, 188.4208201774546, 188.4210078487273, 188.42108827927277, 188.42149043199998, 188.42395696872734, 188.42410442472735, 188.42563260509095, 188.4287828014546, 188.43204023854545, 188.44147742254552, 188.44725501672733, 192.51522497163637, 192.5152919970909, 192.5171553047273, 192.5171687098182, 192.51747702690915, 192.51749043200005, 192.51782555927278, 192.51904542254553, 192.52042614690913, 196.61196225163638, 196.61290060800005, 196.6144019781818, 196.61512585309094, 196.61617145018187, 196.61670765381822, 196.61744493381818, 196.62150667636368, 200.70670217309092, 200.70691665454547, 200.7090480640001, 200.70953064727274, 204.80270217309098, 204.80441802472728, 204.80482017745462, 204.80589258472727, 208.89907751563638, 208.90005608727276, 208.90161107781827, 212.99424640000004, 212.99506411054546, 212.9953188072728, 212.9955198836364, 212.9963644043637, 212.9970614690909, 217.09036704581823, 217.09048769163638, 217.09149307345461, 217.0955816261818, 217.09843691054547, 221.1907505105455, 225.28234023563644, 225.2841767330909, 225.28447164509092, 225.28566469818188, 229.37883622400005, 229.3835146007273, 237.57062174254548, 241.66789522618186, 249.86256283927273, 262.1457504116364, 270.3390775156364]\navgArr7 = [118.78773436509093, 118.78825716363637, 118.78853867054544, 126.97961371927276, 131.0759488465455, 131.07596225163638, 131.07598906181823, 131.07601587200003, 131.0760694923637, 131.07696763345456, 135.17160031418183, 135.17201587200003, 135.1721231127273, 135.17216332800004, 135.17239121454546, 135.1724180247273, 139.267573504, 139.2676941498182, 139.2678416058182, 139.26786841600003, 139.26797565672732, 139.26806949236365, 139.2680963025455, 139.2681365178182, 139.26814992290912, 139.26819013818184, 139.26831078400002, 139.2686995316364, 139.26891401309095, 139.26894082327274, 143.36364052945456, 143.36365393454548, 143.36369414981823, 143.3637075549091, 143.36372096000002, 143.36373436509092, 143.36374777018185, 143.3638416058182, 143.3641365178182, 143.36424375854548, 143.36431078400003, 143.36443142981818, 147.45957350400005, 147.45980139054544, 147.45981479563636, 147.45990863127273, 147.46010970763638, 147.46020354327277, 147.46028397381818, 147.46031078400003, 147.46035099927275, 147.4604046196364, 147.46048505018186, 147.46049845527273, 147.46051186036365, 147.46064591127276, 147.4608469876364, 147.4610212538182, 147.461048064, 147.55354319127275, 151.55501049018184, 151.5552249716364, 151.55564052945456, 151.55564052945456, 151.55592203636368, 151.55602927709094, 151.55608289745456, 151.5561365178182, 151.55614992290913, 151.55621694836367, 151.55623035345457, 151.55631078400003, 151.55632418909096, 151.55633759418185, 151.5564314298182, 151.55644483490912, 151.55644483490914, 151.5565788858182, 151.55676655709092, 151.5568872029091, 155.65150647854546, 155.6515466938182, 155.65158690909092, 155.6517343650909, 155.6518282007273, 155.65185501090912, 155.6519756567273, 155.65202927709095, 155.65216332800003, 155.65224375854547, 155.65228397381821, 155.65237780945455, 155.6524180247273, 155.6524314298182, 155.65244483490912, 155.65265931636367, 155.6528872029091, 155.65306146909094, 155.65312849454548, 159.74719816145458, 159.74733221236366, 159.74734561745453, 159.7474394530909, 159.74750647854546, 159.7476539345455, 159.7477075549091, 159.74773436509093, 159.74778798545458, 159.74778798545458, 159.74781479563637, 159.7478147956364, 159.7478282007273, 159.74785501090912, 159.74797565672728, 159.74804268218185, 159.74804268218185, 159.74808289745457, 159.7480828974546, 159.74812311272728, 159.7481365178182, 159.7481767330909, 159.74835099927273, 159.74836440436366, 159.74840461963637, 159.74847164509092, 159.74847164509094, 159.74848505018187, 159.74860569600003, 159.74860569600003, 159.748753152, 163.84314454109096, 163.84326518690912, 163.8435198836364, 163.84353328872731, 163.8435600989091, 163.84361371927275, 163.84364052945458, 163.84370755490912, 163.84372095999998, 163.8437611752728, 163.8437745803637, 163.84380139054548, 163.8438550109091, 163.84389522618184, 163.84397565672728, 163.84405608727278, 163.84406949236364, 163.84409630254547, 163.8442571636364, 163.84429737890912, 163.84429737890912, 163.84436440436366, 163.8443912145455, 163.84447164509092, 163.84448505018182, 163.84452526545454, 163.8445520756364, 163.8445654807273, 163.84461910109093, 163.84463250618185, 163.8447129367273, 163.84473974690914, 163.84494082327274, 163.8451418996364, 167.93898367999998, 167.93901049018183, 167.9392249716364, 167.93947966836367, 167.9395332887273, 167.9395466938182, 167.93957350400004, 167.9396539345455, 167.9396941498182, 167.93974777018187, 167.93977458036363, 167.93985501090913, 167.93988182109092, 167.93988182109092, 167.940015872, 167.94005608727278, 167.9401767330909, 167.94019013818186, 167.94021694836366, 167.94027056872733, 167.94035099927274, 167.94047164509095, 167.94049845527275, 167.9405118603637, 167.94052526545457, 167.9405922909091, 167.94061910109096, 167.94068612654547, 167.94069953163637, 167.94076655709094, 167.94077996218184, 167.94077996218186, 167.94090060800005, 167.94132957090915, 172.03518475636366, 172.03549307345457, 172.03550647854544, 172.03562712436369, 172.03568074472733, 172.03569414981823, 172.03572096, 172.03581479563638, 172.03582820072725, 172.0358282007273, 172.035868416, 172.03601587200004, 172.03606949236365, 172.03609630254547, 172.03609630254547, 172.0361231127273, 172.03619013818184, 172.0362035432727, 172.03621694836366, 172.03623035345458, 172.0362705687273, 172.03631078400002, 172.0363375941819, 172.03637780945456, 172.0364046196364, 172.0364180247273, 172.03647164509093, 172.03648505018188, 172.0365922909091, 172.03663250618186, 172.03665931636365, 172.03668612654548, 172.036753152, 172.0369542283637, 172.03712849454547, 172.03763788800003, 176.13088984436362, 176.13099708509094, 176.13111773090912, 176.13122497163636, 176.13131880727278, 176.13146626327273, 176.13160031418184, 176.13161371927276, 176.13161371927276, 176.13165393454543, 176.13172096000002, 176.1318147956364, 176.1318147956364, 176.132015872, 176.13201587200004, 176.13205608727276, 176.13208289745455, 176.13241802472731, 176.13249845527275, 176.13259229090912, 176.1327263418182, 176.1328469876364, 176.13295422836364, 176.13296763345454, 180.2268094138182, 180.22719816145457, 180.2273724276364, 180.22753328872727, 180.2276539345455, 180.22766733963638, 180.22766733963638, 180.22797565672732, 180.22804268218187, 180.22809630254548, 180.22816332800002, 180.22817673309092, 180.22817673309095, 180.22832418909093, 180.22839121454547, 180.22840461963634, 180.2284180247273, 180.2284180247273, 180.22843142981822, 180.22851186036365, 180.22852526545458, 180.2285788858182, 180.2286191010909, 180.2286861265455, 180.22872634181823, 180.22879336727274, 180.22886039272726, 180.22902125381822, 180.22916870981823, 180.22983896436367, 184.32290324945453, 184.32346626327273, 184.32351988363644, 184.32360031418182, 184.32370755490913, 184.32370755490916, 184.32378798545454, 184.32380139054544, 184.3238282007273, 184.32385501090914, 184.32389522618186, 184.3239086312728, 184.32397565672727, 184.32400246690915, 184.32404268218187, 184.32425716363642, 184.32433759418186, 184.32445824000007, 184.32447164509097, 184.32448505018183, 184.32449845527276, 184.32455207563638, 184.32459229090912, 184.32461910109095, 184.32468612654546, 184.32482017745457, 184.32486039272732, 184.32490060800006, 184.32511508945456, 184.32542340654544, 188.41918475636365, 188.4192115665455, 188.41954669381823, 188.41964052945457, 188.4196539345455, 188.41968074472732, 188.4197075549091, 188.41985501090912, 188.4199220363637, 188.4200963025455, 188.42013651781818, 188.4201499229091, 188.42023035345457, 188.42024375854547, 188.42027056872732, 188.4204046196364, 188.42060569600005, 188.42060569600005, 188.42064591127274, 188.4206459112728, 188.42076655709093, 188.42080677236368, 188.42087379781822, 188.42088720290914, 188.42092741818186, 188.4210882792728, 188.4212893556364, 188.42174512872734, 188.4393192029091, 192.51505070545457, 192.5150775156364, 192.51554669381818, 192.51585501090915, 192.51590863127274, 192.51610970763636, 192.5161097076364, 192.5161231127273, 192.51617673309093, 192.51620354327272, 192.5162571636364, 192.51629737890912, 192.51639121454548, 192.51665931636364, 192.5166727214546, 192.51682017745458, 192.51691401309094, 192.5169408232728, 192.5170212538182, 192.51738319127276, 196.61105070545463, 196.61139923781818, 196.61150647854546, 196.61165393454547, 196.61174777018184, 196.61177458036363, 196.61180139054545, 196.61188182109098, 196.61205608727275, 196.61265931636365, 196.6127397469091, 196.61275315200004, 196.61312849454546, 196.6132625454546, 196.6132759505455, 196.61355745745456, 200.706688768, 200.70747966836362, 200.70776117527276, 200.70805608727278, 200.70825716363638, 200.70835099927274, 200.70839121454546, 200.70841802472728, 200.70845824000003, 200.70877996218184, 200.7088603927273, 204.8031177309091, 204.8032383767273, 204.80337242763636, 204.80361371927273, 204.80374777018184, 204.80380139054552, 204.80382820072728, 204.8038550109091, 204.80385501090913, 204.80388182109095, 204.80397565672732, 204.80432418909092, 204.80441802472734, 204.80467272145458, 208.89906411054545, 208.89953328872724, 208.89978798545457, 208.8999622516364, 208.90006949236366, 208.90008289745455, 208.9001097076364, 208.90021694836366, 208.90023035345456, 208.90028397381818, 208.90059229090912, 208.9006191010909, 208.90063250618178, 208.9006861265455, 208.90082017745462, 208.90108827927278, 208.9010882792728, 212.9953724276364, 212.995426048, 212.9954528581818, 212.99546626327276, 212.99547966836366, 212.99553328872727, 212.99560031418187, 212.9957075549091, 212.99586841600004, 212.99592203636365, 212.99594884654547, 212.99608289745458, 212.99619013818184, 212.99637780945457, 212.99644483490908, 212.9965654807273, 212.9966325061818, 212.99665931636366, 212.99667272145456, 212.9968603927273, 217.09103730036367, 217.09126518690908, 217.09137242763637, 217.09137242763643, 217.09177458036365, 217.09190863127276, 217.09235099927278, 217.09256548072725, 217.09260569600002, 217.09315530472733, 221.18719816145457, 221.18753328872728, 221.18762712436364, 221.187868416, 221.18875315200003, 225.2832517818182, 225.2832651869091, 225.2834930734546, 225.2838147956364, 225.2839622516364, 225.28398906181826, 225.28409630254544, 225.28447164509092, 225.28499444363638, 225.28519552, 229.37941264290913, 229.37997565672728, 229.38016332799998, 229.38068612654547, 233.4751713512727, 233.47522497163638, 233.47557350400007, 233.47560031418183, 233.47568074472724, 233.4756941498182, 233.47569414981822, 233.47570755490912, 233.47632418909095, 233.47680677236363, 237.57173436509095, 237.57219013818178, 241.6672115665455, 241.66820354327277, 241.66821694836364, 245.7648067723636, 253.95577458036362, 253.95586841600004, 253.95592203636363, 262.14796225163633, 262.14831078399993, 270.3395466938182, 270.3400024669091, 278.5319488465455, 294.91758426763636, 294.9177719389091]\navgArr9 = [98.3116754618182, 98.31239933672728, 98.31245295709091, 98.31306959127274, 98.3132840727273, 98.31380687127273, 98.31526802618184, 102.40797037381819, 102.40818485527275, 102.4082384756364, 102.40857360290913, 102.40865403345452, 102.40890873018184, 102.40900256581818, 102.40921704727272, 102.40955217454547, 102.40991411200002, 102.41061117672729, 106.50358162618184, 106.50363524654547, 106.50387653818181, 106.50401058909094, 106.50414464, 106.50415804509095, 106.50418485527275, 106.50427869090912, 106.5044529570909, 106.5046942487273, 106.50484170472726, 106.5048685149091, 106.50506959127273, 106.5051500218182, 106.5051500218182, 106.505471744, 106.5059007069091, 106.50614199854546, 110.59895158690911, 110.59931352436364, 110.59942076509091, 110.59982291781819, 110.5998229178182, 110.60015804509094, 110.60022507054548, 110.60046636218183, 110.6006004130909, 110.60069424872727, 110.60077467927273, 110.60081489454544, 110.60104278109091, 110.60112321163636, 110.60117683200001, 110.60182027636365, 114.69509904290909, 114.6951928785455, 114.69534033454548, 114.69551460072731, 114.69558162618186, 114.69562184145458, 114.69572908218183, 114.6958095127273, 114.6958095127273, 114.69584972800001, 114.69618485527273, 114.69621166545456, 114.69627869090911, 114.69629209600002, 114.69630550109093, 114.6964261469091, 114.69647976727273, 114.69653338763638, 114.69674786909093, 114.69676127418182, 114.69696235054548, 114.69698916072728, 114.69704278109093, 114.69710980654547, 114.69728407272731, 114.69795432727274, 114.69930824145455, 118.79045559854546, 118.79073710545455, 118.79081753600002, 118.79115266327275, 118.79127330909094, 118.79131352436364, 118.79135373963636, 118.79142076509093, 118.79148779054547, 118.79166205672729, 118.7918229178182, 118.79194356363638, 118.79201058909092, 118.79201058909092, 118.79206420945455, 118.79226528581822, 118.79258700800001, 118.79265403345457, 118.79268084363639, 118.79277467927274, 118.79278808436364, 118.79278808436364, 118.79292213527275, 118.79293554036366, 118.79294894545455, 118.79296235054545, 118.79329747781821, 118.79341812363637, 122.88664326981821, 122.88673710545454, 122.88679072581822, 122.88680413090911, 122.88734033454548, 122.88735373963638, 122.88738054981819, 122.8875280058182, 122.88770227200004, 122.88778270254548, 122.88778270254548, 122.88779610763639, 122.88790334836364, 122.88793015854547, 122.88801058909094, 122.88813123490908, 122.88819826036367, 122.8882786909091, 122.88833231127273, 122.88843955200002, 122.88845295709096, 122.8885736029091, 122.88901597090911, 122.88917683200002, 122.88919023709096, 122.889766656, 122.89018221381819, 122.8906379869091, 126.98262986472727, 126.98273710545455, 126.9830588276364, 126.98330011927276, 126.98342076509093, 126.98363524654548, 126.98367546181821, 126.98370227200003, 126.98372908218184, 126.98372908218184, 126.98375589236365, 126.9837961076364, 126.98380951272729, 126.98388994327273, 126.9839435636364, 126.98402399418183, 126.98406420945456, 126.98409101963642, 126.98421166545457, 126.98433231127275, 126.98434571636366, 126.98438593163637, 126.98456019781821, 126.98460041309092, 126.98476127418185, 126.9848417047273, 126.9848551098182, 126.98501597090907, 126.985029376, 126.98502937600001, 126.9856594152727, 131.07734297600004, 131.07798642036366, 131.07832154763636, 131.07904542254545, 131.07905882763637, 131.07928671418185, 131.07930011927274, 131.07939395490908, 131.0794207650909, 131.07948779054547, 131.0795011956364, 131.0795146007273, 131.079554816, 131.07960843636366, 131.0796486516364, 131.07975589236366, 131.07975589236366, 131.0798363229091, 131.0798363229091, 131.08006420945455, 131.0801312349091, 131.08025188072727, 131.08025188072733, 131.0804127418182, 131.080439552, 131.08043955200003, 131.08049317236365, 131.08066743854545, 131.08072105890912, 131.08081489454548, 131.08093554036367, 131.08096235054546, 131.08096235054546, 131.0810159709091, 131.08106959127275, 131.081176832, 131.08121704727273, 135.1737049134546, 135.1743483578182, 135.1743483578182, 135.17440197818183, 135.17442878836366, 135.17450921890915, 135.17484434618183, 135.1749381818182, 135.1751928785455, 135.1751928785455, 135.17534033454547, 135.17538054981821, 135.17539395490908, 135.17539395490908, 135.1755146007273, 135.17555481600002, 135.1756888669091, 135.17575589236364, 135.17576929745456, 135.17576929745456, 135.17586313309093, 135.17588994327275, 135.1759435636364, 135.17606420945458, 135.17617145018184, 135.17617145018184, 135.17625188072728, 135.17630550109092, 135.176439552, 135.17646636218183, 135.17647976727275, 135.1765467927273, 135.17657360290912, 135.17662722327276, 135.1766942487273, 135.1766942487273, 135.17672105890912, 135.1768417047273, 135.1774449338182, 135.1775789847273, 135.1777264407273, 139.26997301527274, 139.2702009018182, 139.27032154763637, 139.2704287883637, 139.27044219345458, 139.2706566749091, 139.2707505105455, 139.2707639156364, 139.27085775127273, 139.27095158690912, 139.27100520727276, 139.2710722327273, 139.27115266327277, 139.2712062836364, 139.271259904, 139.27125990400003, 139.27143417018186, 139.2715011956364, 139.27152800581823, 139.2715414109091, 139.27154141090912, 139.27160843636364, 139.2717961076364, 139.2718229178182, 139.271849728, 139.27187653818183, 139.27205080436366, 139.2723859316364, 139.27249317236365, 139.27250657745452, 139.27253338763637, 139.2725601978182, 139.2726942487273, 139.27380687127277, 143.36626792727273, 143.36628133236366, 143.36630814254548, 143.36642878836363, 143.3666566749091, 143.36671029527275, 143.36676391563637, 143.36679072581822, 143.366817536, 143.36688456145455, 143.36712585309093, 143.36715266327275, 143.3671928785455, 143.3672196887273, 143.36725990400004, 143.36732692945455, 143.36744757527276, 143.3675146007273, 143.36754141090913, 143.36755481600002, 143.36775589236365, 143.36786313309094, 143.367997184, 143.36807761454548, 143.36809101963638, 143.36810442472728, 143.3681178298182, 143.36825188072729, 143.3682786909091, 143.36854679272727, 143.36860041309095, 143.3687076538182, 143.36888192000004, 143.3690695912728, 143.3695923898182, 143.369914112, 143.37015540363637, 143.37026264436363, 143.37114738036362, 143.371536128, 147.46111508945455, 147.4617585338182, 147.46195961018185, 147.4620132305455, 147.46232154763638, 147.46317947345457, 147.46344757527277, 147.46352800581818, 147.46356822109092, 147.4636888669091, 147.46374248727273, 147.46390334836363, 147.46402399418184, 147.46403739927274, 147.46457360290913, 147.46477467927275, 147.46481489454547, 147.4648417047273, 147.46506959127274, 147.46510980654546, 147.46536450327275, 147.46549855418186, 151.55801323054547, 151.55820090181817, 151.55821430690912, 151.55850921890908, 151.55887115636364, 151.5588979665455, 151.55920628363637, 151.55931352436366, 151.55938054981823, 151.55940736000005, 151.5596486516364, 151.55970227200004, 151.5598229178182, 151.55984972799996, 151.55993015854548, 151.5599569687273, 151.56005080436367, 151.56015804509093, 151.56018485527275, 151.56047976727274, 151.56051998254551, 151.56053338763638, 151.56061381818188, 151.56065403345457, 151.5606540334546, 151.5607210589091, 151.56088192, 151.56102937600005, 151.56113661672728, 151.56216880872728, 155.65473710545456, 155.65485775127274, 155.65487115636364, 155.65499180218183, 155.65513925818186, 155.65523309381817, 155.65530011927274, 155.6555682210909, 155.65559503127272, 155.6556486516364, 155.65567546181822, 155.65594356363638, 155.65602399418182, 155.65642614690913, 155.6564797672727, 155.6565333876364, 155.6565736029091, 155.65705618618185, 155.65830285963636, 155.65855755636366, 159.75010706618184, 159.75045559854547, 159.7505360290909, 159.75053602909094, 159.75077732072725, 159.7508041309091, 159.75083094109092, 159.75087115636367, 159.7508979665455, 159.7509247767273, 159.75099180218183, 159.75113925818184, 159.75120628363638, 159.75125990400002, 159.75144757527272, 159.75146098036367, 159.7515146007273, 159.751702272, 159.7517692974546, 159.75180951272728, 159.7518095127273, 159.75197037381818, 159.75229209600002, 159.75233231127274, 159.75237252654546, 159.75241274181818, 159.7525601978182, 159.752587008, 159.75379346618186, 159.75423583418186, 163.84610706618184, 163.84650921890912, 163.8478631330909, 163.84802399418186, 163.8482652858182, 163.84854679272732, 163.84858700800004, 163.8487210589091, 163.8489623505455, 163.84936450327274, 163.84952536436364, 167.94301861236366, 167.94304542254548, 167.94328671418185, 167.94335373963636, 167.9439569687273, 167.94441274181824, 167.9450025658182, 167.94539131345456, 167.94551195927278, 167.94614199854547, 172.0382143069091, 172.03826792727276, 172.03829473745455, 172.0382947374546, 172.03872370036368, 172.03911244800003, 172.03915266327277, 172.03988994327273, 172.04060041309094, 172.04082829963633, 172.04136450327275, 172.0415387694546, 172.04194092218185, 172.04212859345458, 176.1346164596364, 176.13472370036368, 176.13480413090915, 176.13487115636366, 176.1356352465455, 176.13575589236362, 176.13672105890913, 176.13719023709095, 176.13733769309096, 176.13736450327278, 176.13811518836366, 176.1384101003637, 180.2304153832727, 180.2316620567273, 180.23226528581822, 180.2327076538182, 180.23384708654547, 184.32801058909092, 184.32830550109094, 184.32831890618183, 184.32937790836368, 184.32983368145455, 184.33020902400003, 188.4213161658182, 188.4232062836364, 188.4238363229091, 188.42451998254546, 188.4257532509091, 188.42626264436365, 188.4262626443637, 192.51880413090913, 192.52050657745457, 192.52173984581816, 196.61442878836365, 196.61572908218182, 196.61631890618182, 200.71242614690908, 200.71254679272727, 204.80731352436362, 204.80740735999998, 204.80917683200002, 204.8092974778182, 208.90234835781823, 212.99833495272725, 213.00080148945457]\n\na0 = np.array(avgArr0)\na1 = np.array(avgArr1)\na3 = np.array(avgArr3)\na5 = np.array(avgArr5)\na7 = np.array(avgArr7)\na9 = np.array(avgArr9)\n\nsns.set(style=\"white\", palette=\"muted\", color_codes=True)\n\nf, axes = plt.subplots(figsize=(10, 10))\n\naxlabel = 'Среднее время, мс'\n\nsns.distplot(a0, color=\"#4285f4\", ax=axes, axlabel=axlabel, label='0')\nsns.distplot(a1, color=\"#ea4335\", ax=axes, axlabel=axlabel, label='0.1')\nsns.distplot(a3, color=\"#fbbc04\", ax=axes, axlabel=axlabel, label='0.3')\nsns.distplot(a5, color=\"#34a853\", ax=axes, axlabel=axlabel, label='0.5')\nsns.distplot(a7, color=\"#ff6d01\", ax=axes, axlabel=axlabel, label='0.7')\nsns.distplot(a9, color=\"#46bdc6\", ax=axes, axlabel=axlabel, label='0.9')\n\naxes.legend()\n\nplt.setp(axes)\nplt.tight_layout()\nplt.show()" ]
[ [ "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.subplots", "matplotlib.pyplot.setp", "matplotlib.pyplot.show", "numpy.array" ] ]
dliu5812/PDAM
[ "09a5511e6bd10158ea06771c63be3150982b9fe3" ]
[ "maskrcnn_benchmark/modeling/domain_adaption/LabelResizeLayer.py" ]
[ "#\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\nimport random\nimport torch\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Function\nimport cv2\n\n\nclass ImageLabelResizeLayer(nn.Module):\n \"\"\"\n Resize label to be the same size with the samples\n \"\"\"\n def __init__(self):\n super(ImageLabelResizeLayer, self).__init__()\n\n\n def forward(self,x, need_backprop):\n\n feats = x.detach().cpu().numpy()\n lbs = need_backprop.detach().cpu().numpy()\n gt_blob = np.zeros((lbs.shape[0], feats.shape[2], feats.shape[3], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n lb=np.array([lbs[i]])\n lbs_resize = cv2.resize(lb, (feats.shape[3] ,feats.shape[2]), interpolation=cv2.INTER_NEAREST)\n gt_blob[i, 0:lbs_resize.shape[0], 0:lbs_resize.shape[1], 0] = lbs_resize\n\n channel_swap = (0, 3, 1, 2)\n gt_blob = gt_blob.transpose(channel_swap)\n y=Variable(torch.from_numpy(gt_blob)).cuda()\n y=y.squeeze(1).long()\n return y\n\n\nclass InstanceLabelResizeLayer(nn.Module):\n\n\n def __init__(self):\n super(InstanceLabelResizeLayer, self).__init__()\n self.minibatch=256\n\n def forward(self, x,need_backprop):\n feats = x.data.cpu().numpy()\n lbs = need_backprop.data.cpu().numpy()\n\n resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n resized_lbs[i*self.minibatch:(i+1)*self.minibatch] = lbs[i]\n\n y=torch.from_numpy(resized_lbs).cuda()\n\n return y\n\nclass FcLabelResizeLayer(nn.Module):\n\n\n def __init__(self):\n super(FcLabelResizeLayer, self).__init__()\n self.minibatch = 1\n\n def forward(self, x,need_backprop):\n feats = x.data.cpu().numpy()\n lbs = need_backprop.data.cpu().numpy()\n\n resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n resized_lbs[i*self.minibatch:(i+1)*self.minibatch] = lbs[i]\n\n y=torch.from_numpy(resized_lbs).cuda().long()\n\n return y" ]
[ [ "numpy.array", "numpy.ones", "torch.from_numpy", "numpy.zeros" ] ]
Burntt/MastersMachineLearning
[ "9c8896b4dfe46ee02bc5fdbca47acffbeca6828e" ]
[ "day10_Optimization_and_Regularization_in_DL/notmnist.py" ]
[ "import os\nfrom glob import glob\n\nimport numpy as np\nfrom matplotlib.pyplot import imread\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_notmnist(\n path=\"./notMNIST_small\", letters=\"ABCDEFGHIJ\", img_shape=(28, 28), test_size=0.25, one_hot=False\n):\n\n # download data if it's missing. If you have any problems, go to the urls and load it manually.\n if not os.path.exists(path):\n if not os.path.exists(\"./notMNIST_small.tar.gz\"):\n print(\"Downloading data...\")\n assert (\n os.system(\n \"curl http://yaroslavvb.com/upload/notMNIST/notMNIST_small.tar.gz > notMNIST_small.tar.gz\"\n )\n == 0\n )\n print(\"Extracting ...\")\n assert os.system(\"tar -zxvf notMNIST_small.tar.gz > untar_notmnist.log\") == 0\n\n data, labels = [], []\n print(\"Parsing...\")\n for img_path in glob(os.path.join(path, \"*/*\")):\n class_i = img_path.split(os.sep)[-2]\n if class_i not in letters:\n continue\n try:\n data.append(imread(img_path))\n labels.append(\n class_i,\n )\n except BaseException:\n print(\"found broken img: %s [it's ok if <10 images are broken]\" % img_path)\n\n data = np.stack(data)[:, None].astype(\"float32\")\n data = (data - np.mean(data)) / np.std(data)\n\n # convert classes to ints\n letter_to_i = {l: i for i, l in enumerate(letters)}\n labels = np.array(list(map(letter_to_i.get, labels)))\n\n if one_hot:\n labels = (np.arange(np.max(labels) + 1)[None, :] == labels[:, None]).astype(\"float32\")\n\n # split into train/test\n X_train, X_test, y_train, y_test = train_test_split(\n data, labels, test_size=test_size, random_state=42\n )\n\n print(\"Done\")\n return X_train, y_train, X_test, y_test\n" ]
[ [ "matplotlib.pyplot.imread", "numpy.max", "numpy.stack", "numpy.std", "sklearn.model_selection.train_test_split", "numpy.mean" ] ]
pedro-abreu/twostream-attention
[ "60a47c50b8f2427911e5e30fd6c6f933dbf08a4e" ]
[ "code/code_AVA/extra/pose_test.py" ]
[ "import tensorflow as tf\nimport utils\nimport voting\nfrom pose_model import pose_create_model, compile_model\nfrom pose_data import load_split, get_AVA_set\nimport time\nfrom keras import backend as K\nimport numpy as np\nimport pickle\n\n\ndef main():\n\n root_dir = '../../data/AVA/files/'\n\n # Load list of action classes and separate them (from utils_stream)\n classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')\n\n # Parameters for training (batch size 32 is supposed to be the best?)\n # Parameters for training\n params = {'dim': (300, 300), 'batch_size': 32,\n 'n_classes': len(classes['label_id']), 'n_channels': 3,\n 'nb_epochs': 200, 'model': 'alexnet', 'email': True, 'train_chunk_size': 2**12,\n 'validation_chunk_size': 2**12}\n soft_sigmoid = True\n store_predictions = True\n minValLoss = 9999990.0\n split = \"test\"\n\n # Get validation set from directory\n partition = {}\n partition['test'] = get_AVA_set(classes=classes, filename=root_dir + \"AVA_\" + split.title() + \"_Custom_Corrected.csv\", soft_sigmoid=True)\n\n time_str = time.strftime(\"%y%m%d%H%M\", time.localtime())\n result_csv = \"output_test_pose_\" + time_str + \".csv\"\n\n # Load trained model\n pose_weights = \"../models/pose_alexnet_1808310209.hdf5\"\n model = pose_create_model(classes=classes['label_id'], soft_sigmoid=soft_sigmoid, image_shape=params['dim'], model_name=params['model'])\n model = compile_model(model, soft_sigmoid=soft_sigmoid)\n model.load_weights(pose_weights)\n\n print(\"Test set size: \" + str(len(partition['test'])))\n\n # Load chunks\n test_splits = utils.make_chunks(original_list=partition['test'], size=len(partition['test']), chunk_size=2**11)\n\n # Test directories where pre-processed test files are\n pose_dir = \"/media/pedro/actv-ssd/pose_\" + split + \"/\"\n\n test_chunks_count = 0\n\n pose_votes = {}\n obj_votes = {}\n human_votes = {}\n\n for row in partition['test']:\n row = row.split(\"@\")\n i = row[0] + \"@\" + row[1] + \"@\" + str(row[2]) + \"@\" + str(row[3]) + \"@\" + str(row[4]) + \"@\" + str(row[5])\n pose_votes[i] = np.zeros(utils.POSE_CLASSES)\n obj_votes[i] = np.zeros(utils.OBJ_HUMAN_CLASSES)\n human_votes[i] = np.zeros(utils.HUMAN_HUMAN_CLASSES)\n\n test_predictions = []\n with tf.device('/gpu:0'):\n for testIDS in test_splits:\n # TODO Technically it shouldnt return labels here (these are ground truth)\n x_test_pose, y_test_pose, y_test_object, y_test_human = load_split(testIDS, None, params['dim'], params['n_channels'], split, filter_type, soft_sigmoid=True, train=False)\n print(\"Predicting on chunk \" + str(test_chunks_count) + \"/\" + str(len(test_splits)))\n\n predictions = model.predict(x_test_pose, batch_size=params['batch_size'], verbose=1)\n if store_predictions is True:\n # print(predictions[0][0])\n # print(predictions[1][0])\n # print(predictions[2][0])\n\n # tarr = np.hstack((np.vstack(predictions[0]), np.vstack(predictions[1]), np.vstack(predictions[2])))\n test_predictions.append(predictions)\n\n # Convert predictions to readable output and perform majority voting\n voting.pred2classes(testIDS, predictions, pose_votes, obj_votes, human_votes, thresh=0.4)\n test_chunks_count += 1\n\n if store_predictions is True:\n #tp = np.vstack(test_predictions)\n # print(tp.shape)\n with open(\"thresholds/pose/predictions_pose_\" + time_str + \".pickle\", 'wb') as handle:\n pickle.dump(test_predictions, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # When you're done getting all the votes, write output csv\n with open(result_csv, \"a\") as output_file:\n for key in pose_votes:\n idx = key.split(\"@\")\n actions = []\n pv = pose_votes[key]\n pose_vote = pv.argmax(axis=0) + 1\n actions.append(pose_vote)\n\n # Get 3 top voted object\n ov = obj_votes[key]\n top_three_obj_votes = ov.argsort()[-3:][::-1] + utils.POSE_CLASSES + 1\n for t in top_three_obj_votes:\n if t != 0: # Often there might only be two top voted or one\n actions.append(t)\n # Get 3 top voted human\n hv = human_votes[key]\n top_three_human_votes = hv.argsort()[-3:][::-1] + utils.POSE_CLASSES + utils.OBJ_HUMAN_CLASSES + 1\n for t in top_three_human_votes:\n if t != 0: # Often there might only be two top voted or one\n actions.append(t)\n\n video_name = idx[0]\n timestamp = idx[1]\n bb_topx = idx[2]\n bb_topy = idx[3]\n bb_botx = idx[4]\n bb_boty = idx[5]\n for a in actions:\n line = video_name + \",\" + timestamp + \",\" + bb_topx + \",\" + bb_topy + \",\" + bb_botx + \",\" + bb_boty + \",\" + str(a)\n output_file.write(\"%s\\n\" % line)\n\n if params['email']:\n utils.sendemail(from_addr='[email protected]',\n to_addr_list=['[email protected]'],\n subject='Finished prediction for ' + filter_type,\n message='Testing pose with following params: ' + str(params),\n login='[email protected]',\n password='1!qwerty')\n\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "tensorflow.device", "numpy.zeros" ] ]
greatwallet/cosypose
[ "e72ce7d521ef61870daef267cbbe65aaebe9d24d" ]
[ "evaluation/meters/detection_meters.py" ]
[ "import numpy as np\r\nfrom sklearn.metrics import average_precision_score\r\nimport xarray as xr\r\nimport torchvision\r\nimport torch\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nfrom .base import Meter\r\n\r\nfrom .utils import (match_poses, get_top_n_ids,\r\n add_valid_gt, get_candidate_matches, add_inst_num)\r\nfrom cosypose.utils.xarray import xr_merge\r\n\r\n\r\nclass DetectionMeter(Meter):\r\n def __init__(self,\r\n iou_threshold=0.5,\r\n errors_bsz=512,\r\n consider_all_predictions=False,\r\n targets=None,\r\n visib_gt_min=-1,\r\n n_top=-1):\r\n\r\n self.iou_threshold = iou_threshold\r\n self.consider_all_predictions = consider_all_predictions\r\n self.targets = targets\r\n self.visib_gt_min = visib_gt_min\r\n self.errors_bsz = errors_bsz\r\n self.n_top = n_top\r\n self.reset()\r\n\r\n def compute_metrics(self, bbox_pred, bbox_gt):\r\n iou_all = torchvision.ops.box_iou(bbox_pred, bbox_gt)\r\n arange_n = torch.arange(len(bbox_pred))\r\n iou = iou_all[arange_n, arange_n]\r\n return dict(iou=iou)\r\n\r\n def compute_metrics_batch(self, bbox_pred, bbox_gt):\r\n metrics = []\r\n ids = torch.arange(len(bbox_pred))\r\n ds = TensorDataset(bbox_pred, bbox_gt, ids)\r\n dl = DataLoader(ds, batch_size=self.errors_bsz)\r\n\r\n for (bbox_pred_, bbox_gt_, ids_) in dl:\r\n metrics.append(self.compute_metrics(bbox_pred_, bbox_gt_))\r\n\r\n if len(metrics) == 0:\r\n metrics.append(dict(\r\n iou=torch.empty(0, dtype=torch.float),\r\n ))\r\n\r\n metricsd = dict()\r\n for k in metrics[0].keys():\r\n metricsd[k] = torch.cat([metrics_n[k] for metrics_n in metrics], dim=0)\r\n return metricsd\r\n\r\n def add(self, pred_data, gt_data):\r\n group_keys = ['scene_id', 'view_id', 'label']\r\n\r\n pred_data = pred_data.float()\r\n gt_data = gt_data.float()\r\n\r\n # Only keep predictions relevant to gt scene and images.\r\n gt_infos = gt_data.infos.loc[:, ['scene_id', 'view_id']].drop_duplicates().reset_index(drop=True)\r\n targets = self.targets\r\n if targets is not None:\r\n targets = gt_infos.merge(targets)\r\n pred_data.infos['batch_pred_id'] = np.arange(len(pred_data))\r\n keep_ids = gt_infos.merge(pred_data.infos)['batch_pred_id']\r\n pred_data = pred_data[keep_ids]\r\n\r\n # Add inst id to the dataframes\r\n pred_data.infos = add_inst_num(pred_data.infos, key='pred_inst_id', group_keys=group_keys)\r\n gt_data.infos = add_inst_num(gt_data.infos, key='gt_inst_id', group_keys=group_keys)\r\n\r\n # Filter predictions according to BOP evaluation.\r\n if not self.consider_all_predictions:\r\n ids_top_n_pred = get_top_n_ids(pred_data.infos,\r\n group_keys=group_keys, top_key='score',\r\n targets=targets, n_top=self.n_top)\r\n pred_data_filtered = pred_data.clone()[ids_top_n_pred]\r\n else:\r\n pred_data_filtered = pred_data.clone()\r\n\r\n # Compute valid targets according to BOP evaluation.\r\n gt_data.infos = add_valid_gt(gt_data.infos,\r\n group_keys=group_keys,\r\n targets=targets,\r\n visib_gt_min=self.visib_gt_min)\r\n\r\n # Compute tentative candidates\r\n cand_infos = get_candidate_matches(pred_data_filtered.infos, gt_data.infos,\r\n group_keys=group_keys,\r\n only_valids=True)\r\n pred_ids = cand_infos['pred_id'].values.tolist()\r\n gt_ids = cand_infos['gt_id'].values.tolist()\r\n cand_bbox_gt = gt_data.bboxes[gt_ids]\r\n cand_bbox_pred = pred_data_filtered.bboxes[pred_ids]\r\n\r\n # Compute metrics for tentative matches\r\n metrics = self.compute_metrics_batch(cand_bbox_pred, cand_bbox_gt)\r\n\r\n # Matches can only be candidates within thresholds\r\n cand_infos['iou'] = metrics['iou'].cpu().numpy()\r\n keep = cand_infos['iou'] >= self.iou_threshold\r\n cand_infos = cand_infos[keep].reset_index(drop=True)\r\n\r\n # Match predictions to ground truth detections\r\n cand_infos['error'] = - cand_infos['iou']\r\n matches = match_poses(cand_infos, group_keys=group_keys)\r\n\r\n # Save all informations in xarray datasets\r\n gt_keys = group_keys + ['gt_inst_id', 'valid'] + (['visib_fract'] if 'visib_fract' in gt_infos else [])\r\n gt = gt_data.infos.loc[:, gt_keys]\r\n preds = pred_data.infos.loc[:, group_keys + ['pred_inst_id', 'score']]\r\n matches = matches.loc[:, group_keys + ['pred_inst_id', 'gt_inst_id', 'cand_id']]\r\n\r\n gt = xr.Dataset(gt).rename({'dim_0': 'gt_id'})\r\n matches = xr.Dataset(matches).rename({'dim_0': 'match_id'})\r\n preds = xr.Dataset(preds).rename({'dim_0': 'pred_id'})\r\n\r\n ious = metrics['iou'].cpu().numpy()[matches['cand_id'].values]\r\n matches['iou'] = 'match_id', ious\r\n matches['iou_valid'] = 'match_id', ious >= self.iou_threshold\r\n\r\n fill_values = {\r\n 'iou': np.nan,\r\n 'iou_valid': False,\r\n 'score': np.nan,\r\n }\r\n matches = xr_merge(matches, preds, on=group_keys + ['pred_inst_id'],\r\n dim1='match_id', dim2='pred_id', fill_value=fill_values)\r\n gt = xr_merge(gt, matches, on=group_keys + ['gt_inst_id'],\r\n dim1='gt_id', dim2='match_id', fill_value=fill_values)\r\n\r\n preds_match_merge = xr_merge(preds, matches, on=group_keys+['pred_inst_id'],\r\n dim1='pred_id', dim2='match_id', fill_value=fill_values)\r\n preds['iou_valid'] = 'pred_id', preds_match_merge['iou_valid']\r\n\r\n self.datas['gt_df'].append(gt)\r\n self.datas['pred_df'].append(preds)\r\n self.datas['matches_df'].append(matches)\r\n\r\n def summary(self):\r\n gt_df = xr.concat(self.datas['gt_df'], dim='gt_id')\r\n matches_df = xr.concat(self.datas['matches_df'], dim='match_id')\r\n pred_df = xr.concat(self.datas['pred_df'], dim='pred_id')\r\n valid_df = gt_df.sel(gt_id=gt_df['valid'])\r\n\r\n # AP/mAP @ IoU < threshold\r\n valid_k = 'iou_valid'\r\n n_gts = dict()\r\n\r\n if self.n_top > 0:\r\n group_keys = ['scene_id', 'view_id', 'label']\r\n subdf = gt_df[[*group_keys, 'valid']].to_dataframe().groupby(group_keys).sum().reset_index()\r\n subdf['gt_count'] = np.minimum(self.n_top, subdf['valid'])\r\n for label, group in subdf.groupby('label'):\r\n n_gts[label] = group['gt_count'].sum()\r\n else:\r\n subdf = gt_df[['label', 'valid']].groupby('label').sum()\r\n for label in subdf['label'].values:\r\n n_gts[label] = subdf.sel(label=label)['valid'].item()\r\n\r\n ap_dfs = dict()\r\n\r\n def compute_ap(label_df, label_n_gt):\r\n label_df = label_df.sort_values('score', ascending=False).reset_index(drop=True)\r\n label_df['n_tp'] = np.cumsum(label_df[valid_k].values.astype(np.float))\r\n label_df['prec'] = label_df['n_tp'] / (np.arange(len(label_df)) + 1)\r\n label_df['recall'] = label_df['n_tp'] / label_n_gt\r\n y_true = label_df[valid_k]\r\n y_score = label_df['score']\r\n ap = average_precision_score(y_true, y_score) * y_true.sum() / label_n_gt\r\n label_df['AP'] = ap\r\n label_df['n_gt'] = label_n_gt\r\n return ap, label_df\r\n\r\n df = pred_df[['label', valid_k, 'score']].to_dataframe().set_index(['label'])\r\n for label, label_n_gt in n_gts.items():\r\n if df.index.contains(label):\r\n label_df = df.loc[[label]]\r\n if label_df[valid_k].sum() > 0:\r\n ap, label_df = compute_ap(label_df, label_n_gt)\r\n ap_dfs[label] = label_df\r\n\r\n if len(ap_dfs) > 0:\r\n mAP = np.mean([np.unique(ap_df['AP']).item() for ap_df in ap_dfs.values()])\r\n AP, ap_dfs['all'] = compute_ap(df.reset_index(), sum(list(n_gts.values())))\r\n else:\r\n AP, mAP = 0., 0.\r\n n_gt_valid = int(sum(list(n_gts.values())))\r\n\r\n summary = {\r\n 'n_gt': len(gt_df['gt_id']),\r\n 'n_gt_valid': n_gt_valid,\r\n 'n_pred': len(pred_df['pred_id']),\r\n 'n_matched': len(matches_df['match_id']),\r\n 'matched_gt_ratio': len(matches_df['match_id']) / n_gt_valid,\r\n 'pred_matched_ratio': len(pred_df['pred_id']) / max(len(matches_df['match_id']), 1),\r\n 'iou_valid_recall': valid_df['iou_valid'].sum('gt_id').item() / n_gt_valid,\r\n }\r\n\r\n summary.update({\r\n 'AP': AP,\r\n 'mAP': mAP,\r\n })\r\n\r\n dfs = dict(gt=gt_df, matches=matches_df, preds=pred_df, ap=ap_dfs)\r\n return summary, dfs\r\n" ]
[ [ "torch.empty", "torch.utils.data.DataLoader", "torch.utils.data.TensorDataset", "sklearn.metrics.average_precision_score", "torch.cat", "numpy.unique", "numpy.minimum" ] ]
Chandler/color_science_papers
[ "c43addd63d5dc9bc7ed5f093f60432c929d13b8a" ]
[ "a_simple_algorithm_for_metamer_mismatch_bodies/metamer_mismatch_body.py" ]
[ "import numpy as np\nfrom scipy import optimize\nfrom numpy.testing import assert_array_almost_equal as almost_equal\n\nCOLOR_DIMENSIONS = 3\nLIGHT_DIMENSIONS = 31\n\n# vector representing a light beam with power 1 at every wavelength\nequal_energy_illumination_vector = [1] * LIGHT_DIMENSIONS\n\ndef assert_shape(m, shape):\n if m.shape != shape:\n raise ValueError(\"incorrect shape expected: {} found: {}\".format(m.shape, shape))\n\ndef sample_unit_sphere(npoints):\n \"\"\"\n return `npoints` random points on the unit sphere\n \"\"\"\n vec = np.random.randn(3, npoints)\n vec /= np.linalg.norm(vec, axis=0)\n return vec.T\n\ndef solve_linear_program(\n object_function_coefficents,\n constraint_function=None,\n constraint_function_required_value=None,\n bounds=None):\n \"\"\"\n This method minimizes and maximizes a linear function with respect to\n an equality constraint and lower and upper bounds\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html\n Minimize: c^T * x\n Subject to: \n A_ub * x <= b_ub\n A_eq * x == b_eq\n \"\"\"\n xmax = \\\n optimize.linprog(\n c=object_function_coefficents,\n A_eq=constraint_function,\n b_eq=constraint_function_required_value,\n bounds=bounds).x\n xmin = \\\n optimize.linprog(\n c=object_function_coefficents * -1,\n A_eq=constraint_function,\n b_eq=constraint_function_required_value,\n bounds=bounds).x\n\n return (xmin, xmax)\n\ndef compute_metamer_mismatch_body(\n observer_color_signal_Φ,\n observer_response_functions_Φ,\n observer_response_functions_Ψ,\n scene_illumination_Φ=equal_energy_illumination_vector,\n scene_illumination_Ψ=equal_energy_illumination_vector,\n sampling_resolution=100):\n\n assert_shape(observer_color_signal_Φ, (COLOR_DIMENSIONS,))\n assert_shape(observer_response_functions_Φ, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(observer_response_functions_Ψ, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(scene_illumination_Φ, (LIGHT_DIMENSIONS,))\n assert_shape(scene_illumination_Ψ, (LIGHT_DIMENSIONS,))\n\n color_signal_map_Φ = (observer_response_functions_Φ.T * scene_illumination_Φ).T\n color_signal_map_Ψ = (observer_response_functions_Ψ.T * scene_illumination_Ψ).T\n\n mismatch_body_extrema_points = []\n\n # iterate over a sampling of points on the unit sphere, interpreted as direction vectors\n # pointing in all directions.\n for direction_vector in sample_unit_sphere(sampling_resolution):\n\n # We assume the Euclidan Inner Product for our color vector space. Given that, a vector and its\n # functional (its covector) are identical. (This is why the euclidian dot product is a vector\n # matrix multiplied against itself)\n #\n # This functional can be thought of stacks of parallel\n # planes that are normal to `direction_vector`. Two of these planes will lie tangent to our metamer\n # mismatch body.\n direction_functional = direction_vector\n\n # compose the direction functional R3 -> R, with the color signal map to produce\n # a new funtional R31 -> R. \n ΨF = np.dot(color_signal_map_Ψ, direction_functional)\n\n # construct a linear programming problem\n # equation to minimize and maximize: \n # ΨF, a function from R31 -> R\n # ΨF returns the projection of some reflectance function in R31\n # onto the line in R3 represented by `direction_vector`\n #\n # constraints:\n # 1) constrain the solution set of reflectances to `0 > x_i <= 1`, this limits the solution to\n # physically realizable reflectances\n #\n # 2) constrain the solution set to `color_signal_map_Φ(x) = observer_color_signal_Φ`, \n # this limits the solution to metamers of `observer_color_signal_Φ`\n #\n # These are both convex sets. Their intersection is also a convex set, which is the metamer\n # Mismatch Body we are computing.\n #\n min_reflectance, max_reflectance = \\\n solve_linear_program(\n object_function_coefficents=ΨF,\n constraint_function=color_signal_map_Φ.T,\n constraint_function_required_value=observer_color_signal_Φ,\n bounds=(0,1))\n\n # # inline-test: these two reflectences should be metamers of `observer_color_signal_Φ`\n # almost_equal(observer_color_signal_Φ, np.dot(color_signal_map_Φ.T, min_reflectance), decimal=2)\n # almost_equal(observer_color_signal_Φ, np.dot(color_signal_map_Φ.T, max_reflectance), decimal=2)\n \n min_color_signal_Ψ = np.dot(color_signal_map_Ψ.T, min_reflectance) \n max_color_signal_Ψ = np.dot(color_signal_map_Ψ.T, max_reflectance)\n\n mismatch_body_extrema_points.extend([min_color_signal_Ψ, max_color_signal_Ψ])\n\n # scale the resulting body so that the brightest illuminant color response == 1\n scale_factor = np.max(np.dot(observer_response_functions_Ψ.T, scene_illumination_Ψ))\n\n return [p/scale_factor for p in mismatch_body_extrema_points]\n\ndef compute_object_color_solid(\n observer_response_functions,\n scene_illumination=equal_energy_illumination_vector,\n sampling_resolution=100):\n \"\"\"\n The linear programming formulation of the OCS is identical to that of the MMB minus\n the constraints related to the second observer.\n\n An MMB is a product of two observers but the OCS is simply the set of all object colors\n for a single observer.\n\n \"Computing the object colour solid using spherical sampling\"\n https://ueaeprints.uea.ac.uk/62975/\n \"\"\"\n assert_shape(observer_response_functions, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(scene_illumination, (LIGHT_DIMENSIONS,))\n\n color_signal_map = (observer_response_functions.T * scene_illumination).T\n\n ocs_extrema_points = []\n\n # iterate over a sampling of points on the unit sphere, interpreted as direction vectors\n # pointing in all directions.\n for direction_vector in sample_unit_sphere(sampling_resolution):\n\n direction_functional = direction_vector\n\n # compose the direction functional R3 -> R, with the color signal map to produce\n # a new funtional R31 -> R. \n ΨF = np.dot(color_signal_map, direction_functional)\n\n min_reflectance, max_reflectance = \\\n solve_linear_program(\n object_function_coefficents=ΨF,\n bounds=(0,1))\n\n min_color_signal = np.dot(color_signal_map.T, min_reflectance) \n max_color_signal = np.dot(color_signal_map.T, max_reflectance)\n\n ocs_extrema_points.extend([min_color_signal, max_color_signal])\n\n # scale the resulting body so that the brightest illuminant color response == 1\n scale_factor = np.max(np.dot(observer_response_functions.T, scene_illumination))\n\n return [p/scale_factor for p in ocs_extrema_points]\n" ]
[ [ "scipy.optimize.linprog", "numpy.linalg.norm", "numpy.random.randn", "numpy.dot" ] ]
schoppmp/iree
[ "d573c3dbb4eef8044764ae6d80ca79e37e8de522" ]
[ "bindings/python/tests/testdata/generate_tflite.py" ]
[ "# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport tensorflow as tf\n\n\nclass Squared(tf.Module):\n\n @tf.function\n def __call__(self, x):\n return tf.square(x)\n\n\nmodel = Squared()\nconcrete_func = model.__call__.get_concrete_function(\n tf.TensorSpec(shape=[4, 3], dtype=tf.float32))\n\nconverter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\ntflite_model = converter.convert()\n\nthis_dir = os.path.dirname(__file__)\nwith open(os.path.join(this_dir, \"tflite_sample.fb\"), \"wb\") as f:\n f.write(tflite_model)\n" ]
[ [ "tensorflow.square", "tensorflow.TensorSpec", "tensorflow.lite.TFLiteConverter.from_concrete_functions" ] ]
JoaoVicente129/GamestonkTerminal
[ "5f744f8ff3f034e436d0629fb530ffd41b86e6d9" ]
[ "gamestonk_terminal/behavioural_analysis/reddit_view.py" ]
[ "import argparse\nimport warnings\nimport pandas as pd\nfrom prawcore.exceptions import ResponseException\nfrom requests import HTTPError\nfrom psaw import PushshiftAPI\nimport praw\nimport finviz\nfrom gamestonk_terminal.helper_funcs import check_positive, parse_known_args_and_warn\nfrom gamestonk_terminal import config_terminal as cfg\nfrom gamestonk_terminal.reddit_helpers import (\n print_and_record_reddit_post,\n find_tickers,\n)\n\n\ndef watchlist(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"watchlist\",\n description=\"\"\"Print other users watchlist. [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=5,\n help=\"limit of posts with watchlists retrieved.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n # dt_last_time_market_close = get_last_time_market_was_open(\n # datetime.now() - timedelta(hours=24)\n # )\n # n_ts_after = int(dt_last_time_market_close.timestamp())\n psaw_api = PushshiftAPI()\n submissions = psaw_api.search_submissions(\n # after=n_ts_after,\n subreddit=l_sub_reddits,\n q=\"WATCHLIST|Watchlist|watchlist\",\n filter=[\"id\"],\n )\n\n n_flair_posts_found = 0\n while True:\n try:\n submission = next(submissions, None)\n\n # Check if search_submissions didn't get anymore posts\n if not submission:\n break\n\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Increment count of valid posts found\n n_flair_posts_found += 1\n\n # Check if number of wanted posts found has been reached\n if n_flair_posts_found > ns_parser.n_limit - 1:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if n_flair_posts_found:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous watchlists:\"\n )\n print(s_watchlist_tickers[:-2] + \"\\n\")\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n print(\"\")\n\n\ndef popular_tickers(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"popular\",\n description=\"\"\"Print latest popular tickers. [Source: Reddit] \"\"\",\n )\n parser.add_argument(\n \"-n\",\n \"--number\",\n action=\"store\",\n dest=\"n_top\",\n type=check_positive,\n default=10,\n help=\"display top N tickers\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=50,\n help=\"limit of posts retrieved per sub reddit.\",\n )\n parser.add_argument(\n \"-s\",\n \"--sub\",\n action=\"store\",\n dest=\"s_subreddit\",\n type=str,\n help=\"\"\"\n subreddits to look for tickers, e.g. pennystocks,stocks.\n Default: pennystocks, RobinHoodPennyStocks, Daytrading, StockMarket, stocks, investing,\n wallstreetbets\n \"\"\",\n )\n \"\"\"\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=1,\n help=\"look for the tickers from those n past days.\",\n )\n \"\"\"\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # n_ts_after = int(\n # (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp()\n # )\n\n if ns_parser.s_subreddit:\n if \",\" in ns_parser.s_subreddit:\n l_sub_reddits = ns_parser.s_subreddit.split(\",\")\n else:\n l_sub_reddits = [ns_parser.s_subreddit]\n else:\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n # d_submission = {}\n d_watchlist_tickers = {}\n # l_watchlist_links = list()\n l_watchlist_author = list()\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n psaw_api = PushshiftAPI()\n\n for s_sub_reddit in l_sub_reddits:\n print(\n f\"Search for latest tickers under {ns_parser.n_limit} '{s_sub_reddit}' posts\"\n )\n submissions = psaw_api.search_submissions(\n # after=int(n_ts_after),\n subreddit=s_sub_reddit,\n limit=ns_parser.n_limit,\n filter=[\"id\"],\n )\n\n n_tickers = 0\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's content\n if (\n not submission.removed_by_category\n and (submission.selftext or submission.title)\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n n_tickers += len(l_tickers_found)\n\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n print(f\" {n_tickers} potential tickers found.\")\n\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n\n if lt_watchlist_sorted:\n n_top_stocks = 0\n # pylint: disable=redefined-outer-name\n popular_tickers = []\n for t_ticker in lt_watchlist_sorted:\n if n_top_stocks > ns_parser.n_top:\n break\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n stock_info = finviz.get_stock(t_ticker[0])\n popular_tickers.append(\n (\n t_ticker[1],\n t_ticker[0],\n stock_info[\"Company\"],\n stock_info[\"Sector\"],\n stock_info[\"Price\"],\n stock_info[\"Change\"],\n stock_info[\"Perf Month\"],\n f\"https://finviz.com/quote.ashx?t={t_ticker[0]}\",\n )\n )\n n_top_stocks += 1\n except HTTPError as e:\n if e.response.status_code != 404:\n print(f\"Unexpected exception from Finviz: {e}\")\n except Exception as e:\n print(e, \"\\n\")\n return\n\n popular_tickers_df = pd.DataFrame(\n popular_tickers,\n columns=[\n \"Mentions\",\n \"Ticker\",\n \"Company\",\n \"Sector\",\n \"Price\",\n \"Change\",\n \"Perf Month\",\n \"URL\",\n ],\n )\n\n print(f\"\\nThe following TOP {ns_parser.n_top} tickers have been mentioned:\")\n\n print(popular_tickers_df, \"\\n\")\n else:\n print(\"No tickers found\")\n\n print(\"\")\n\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n\ndef spac_community(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"spac_c\",\n description=\"\"\"Print other users SPACs announcement under subreddit 'SPACs' [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=10,\n help=\"limit of posts with SPACs retrieved\",\n )\n parser.add_argument(\n \"-p\",\n \"--popular\",\n action=\"store_true\",\n default=False,\n dest=\"b_popular\",\n help=\"popular flag, if true the posts retrieved are based on score rather than time\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n # psaw_api = PushshiftAPI()\n\n if ns_parser.b_popular:\n submissions = praw_api.subreddit(\"SPACs\").hot(limit=ns_parser.n_limit)\n else:\n submissions = praw_api.subreddit(\"SPACs\").new(limit=ns_parser.n_limit)\n\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if d_watchlist_tickers:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous SPACs:\"\n )\n print(s_watchlist_tickers[:-2])\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef spac(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"spac\",\n description=\"\"\" Show other users SPACs announcement [Reddit] \"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=5,\n help=\"limit of posts with SPACs retrieved.\",\n )\n \"\"\"\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"look for the tickers from those n past days.\",\n )\n \"\"\"\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n # n_ts_after = int(\n # (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp()\n # )\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n warnings.filterwarnings(\"ignore\") # To avoid printing the warning\n psaw_api = PushshiftAPI()\n submissions = psaw_api.search_submissions(\n # after=n_ts_after,\n subreddit=l_sub_reddits,\n q=\"SPAC|Spac|spac|Spacs|spacs\",\n filter=[\"id\"],\n )\n n_flair_posts_found = 0\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Increment count of valid posts found\n n_flair_posts_found += 1\n\n # Check if number of wanted posts found has been reached\n if n_flair_posts_found > ns_parser.n_limit - 1:\n break\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if n_flair_posts_found:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous SPACs:\"\n )\n print(s_watchlist_tickers[:-2])\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef wsb_community(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"wsb\",\n description=\"\"\"Print what WSB gang are up to in subreddit wallstreetbets. [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=10,\n help=\"limit of posts to print.\",\n )\n parser.add_argument(\n \"-n\",\n \"--new\",\n action=\"store_true\",\n default=False,\n dest=\"b_new\",\n help=\"new flag, if true the posts retrieved are based on being more recent rather than their score.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n l_watchlist_links = list()\n\n # psaw_api = PushshiftAPI()\n\n if ns_parser.b_new:\n submissions = praw_api.subreddit(\"wallstreetbets\").new(\n limit=ns_parser.n_limit\n )\n else:\n submissions = praw_api.subreddit(\"wallstreetbets\").hot(\n limit=ns_parser.n_limit\n )\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if not submission.removed_by_category:\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n except Exception as e:\n print(e, \"\\n\")\n" ]
[ [ "pandas.DataFrame" ] ]
wann31828/Tacotron-2
[ "d7161f950cd42c7a7fd36ec2aaac19ba60567876" ]
[ "tacotron/utils/plot.py" ]
[ "import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n\ndef split_title_line(title_text, max_words=5):\n\t\"\"\"\n\tA function that splits any string based on specific character\n\t(returning it with the string), with maximum number of words on it\n\t\"\"\"\n\tseq = title_text.split()\n\treturn '\\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])\n\ndef plot_alignment(alignment, path, info=None, split_title=False, max_len=None):\n\tif max_len is not None:\n\t\talignment = alignment[:, :max_len]\n\n\tfig = plt.figure(figsize=(8, 6))\n\tax = fig.add_subplot(111)\n\n\tim = ax.imshow(\n\t\talignment,\n\t\taspect='auto',\n\t\torigin='lower',\n\t\tinterpolation='none')\n\tfig.colorbar(im, ax=ax)\n\txlabel = 'Decoder timestep'\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\tplt.xlabel(xlabel)\n\tplt.title(title)\n\tplt.ylabel('Encoder timestep')\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n\tplt.close()\n\n\ndef plot_spectrogram(pred_spectrogram, path, info=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False):\n\tif max_len is not None:\n\t\ttarget_spectrogram = target_spectrogram[:max_len]\n\t\tpred_spectrogram = pred_spectrogram[:max_len]\n\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\n\tfig = plt.figure(figsize=(10, 8))\n\t# Set common labels\n\tfig.text(0.5, 0.18, title, horizontalalignment='center', fontsize=16)\n\n\t#target spectrogram subplot\n\tif target_spectrogram is not None:\n\t\tax1 = fig.add_subplot(311)\n\t\tax2 = fig.add_subplot(312)\n\n\t\tif auto_aspect:\n\t\t\tim = ax1.imshow(np.rot90(target_spectrogram), aspect='auto', interpolation='none')\n\t\telse:\n\t\t\tim = ax1.imshow(np.rot90(target_spectrogram), interpolation='none')\n\t\tax1.set_title('Target Mel-Spectrogram')\n\t\tfig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)\n\t\tax2.set_title('Predicted Mel-Spectrogram')\n\telse:\n\t\tax2 = fig.add_subplot(211)\n\n\tif auto_aspect:\n\t\tim = ax2.imshow(np.rot90(pred_spectrogram), aspect='auto', interpolation='none')\n\telse:\n\t\tim = ax2.imshow(np.rot90(pred_spectrogram), interpolation='none')\n\tfig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax2)\n\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n\tplt.close()\n" ]
[ [ "matplotlib.pyplot.figure", "matplotlib.pyplot.tight_layout", "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "numpy.rot90", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.close", "matplotlib.use", "matplotlib.pyplot.xlabel" ] ]
suryakantpandey/opencv
[ "2a52e44bc605bf73502dee0e0e3954bcda19a246" ]
[ "modules/calib3d/misc/python/test/test_solvepnp.py" ]
[ "#!/usr/bin/env python\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2 as cv\n\nfrom tests_common import NewOpenCVTests\n\nclass solvepnp_test(NewOpenCVTests):\n\n def test_regression_16040(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[700, 400], [700, 600], [900, 600], [900, 400]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n r = np.array([], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r\n )\n\n def test_regression_16040_2(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[[700, 400], [700, 600], [900, 600], [900, 400]]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n r = np.array([], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r\n )\n\n def test_regression_16049(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[[700, 400], [700, 600], [900, 600], [900, 400]]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs\n )\n if e is None:\n # noArray() is supported, see https://github.com/opencv/opencv/issues/16049\n pass\n else:\n eDump = cv.utils.dumpInputArray(e)\n self.assertEqual(eDump, \"InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=1 dims(-1)=2 size(-1)=1x1 type(-1)=CV_32FC1\")\n\n\nif __name__ == '__main__':\n NewOpenCVTests.bootstrap()\n" ]
[ [ "numpy.array" ] ]
jrkoenig/folseparators
[ "7b35a1e5fc27709adcc647528bf8820201408966" ]
[ "experiments/make_charts.py" ]
[ "# Copyright 2020 Stanford University\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse, random, json, os\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\n\n\nimport seaborn as sns\nfrom collections import Counter\nfrom typing import *\nimport typing\n\ndef int_bins(x: List[int]) -> List[float]:\n l,h = min(x),max(x)\n return [x - 0.5 for x in range(l,h+2)]\ndef intdistplot(x: Any, **kwargs: Any) -> Any:\n return sns.distplot(x, bins = int_bins(x), **kwargs)\n\n\ndef main() -> None:\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"results\", type=argparse.FileType('r'))\n parser.add_argument(\"--description\", type=argparse.FileType('r'), default = \"conjectures/benchmark.json\")\n parser.add_argument(\"--output\", \"-o\", type=str, default = \"out/charts\")\n args = parser.parse_args()\n\n\n try:\n os.makedirs(args.output, exist_ok=True)\n os.chdir(args.output)\n except OSError as e:\n print(e)\n return\n\n sns.set(style=\"white\", palette=\"muted\", color_codes=True)\n font = {'size':16, 'family':'serif', 'serif': ['CMU Serif']}\n plt.rc('font', **font)\n plt.rc('mathtext', fontset='cm')\n plt.rc('axes', labelsize='medium')\n plt.rc('xtick', labelsize='medium')\n plt.rc('ytick', labelsize='medium')\n plt.rc('legend', fontsize='medium')\n\n descs = json.load(args.description)\n desc_by_id = {}\n for d in descs:\n desc_by_id[(d['base'], d['conjecture'])] = d\n def desc_of(r: Dict) -> Dict:\n return desc_by_id[(r['base'], r['conjecture'])]\n results = json.load(args.results)\n results = [r for r in results if r['base'] != 'tlb-safety']\n\n summary_file = open(\"summary.txt\", \"w\")\n def _print(*args: Any) -> None:\n print(*args, file=summary_file)\n\n # print(\"Random Formula:\")\n # sample = random.sample(results, 5)\n # for r in sample:\n # print(desc_by_id[(r['base'], r['conjecture'])]['golden_formula'])\n # print(\"\")\n\n # print(\"There are {} protocols\".format(len(set([r['base'] for r in results]))))\n\n\n\n #intdistplot([d['quantifiers'] for d in descs], axlabel=\"quantifier count\", kde=False).get_figure().savefig(\"quantifier_distribution.png\")\n \n # fig = plt.figure(figsize=(8,6))\n # ax = sns.countplot(data = pd.DataFrame([d['base'].replace(\"_\", \" \").replace(\"-\", \" \") for d in descs]), color=\"c\", y = 0, orient='h')\n # ax.set_ylabel(\"protocol\")\n # ax.set_xlabel(\"number of conjuncts\")\n # plt.subplots_adjust(left=0.5)\n # fig.suptitle(\"Distribution of conjucts over protocols\")\n # plt.savefig(\"conjunct_distribution.png\")\n _print(f\"Total CPU hours {sum(r['stats']['total_time'] for r in results)/3600:.0f}\\n\")\n\n for r in results:\n if r['killed']: continue\n if r['stats']['formula'] == 'false':\n print (r['base'], r['conjecture'])\n\n \n s: typing.Counter[str] = Counter()\n f: typing.Counter[str] = Counter()\n k: typing.Counter[str] = Counter()\n for r in results:\n if r['success']:\n s[r['base']] += 1\n elif r['killed']:\n k[r['base']] += 1\n else:\n f[r['base']] += 1\n\n\n # fig = plt.figure(figsize=(8,6))\n # plt.subplots_adjust(left=0.5)\n # ax = plt.axes()\n # labels = list(sorted(set(s.keys()) | set(f.keys()) | set(k.keys())))\n # labels.reverse()\n # plt.barh(range(len(labels)), list(1 for l in labels), color='#319b7c', linewidth=0)\n # plt.barh(range(len(labels)), list(((k[l]+f[l])/float(s[l]+f[l]+k[l]) for l in labels)), color='#fdce4b', linewidth=0)\n # plt.barh(range(len(labels)), list((k[l]/float(s[l]+f[l]+k[l]) for l in labels)), color='#e44033', linewidth=0)\n # plt.yticks(range(len(labels)), labels)\n # plt.xlim(0,1)\n # ax.spines['top'].set_visible(False)\n # ax.spines['right'].set_visible(False)\n # ax.spines['bottom'].set_visible(False)\n # ax.spines['left'].set_visible(False)\n # fig.suptitle(\"Success rate by protocol (normalized)\")\n # plt.savefig(\"success_by_protocol.png\")\n\n missing_experiments = set((d['base'], d['conjecture']) for d in descs) - set((d['base'], d['conjecture']) for d in results)\n if len(missing_experiments) > 0:\n _print(f\"Missing {len(missing_experiments)} results from benchmark\")\n _print(\"Results count: \", len(results), \"{}/{}/{} succ/kill/fail\".format(sum(s.values()),sum(k.values()), sum(f.values())))\n _print(f\"Success rate: {sum(s.values())/len(results)*100.0:0.1f}\" )\n \n # print (\"\\nProb Succ Killed Failed\")\n # for l in labels:\n # print(l, s[l], k[l], f[l])\n # print(\"\")\n\n fig = plt.figure(figsize=(6,4))\n \n s = Counter()\n f = Counter()\n k = Counter()\n for r in results:\n golden_quant_count = desc_by_id[r['base'], r['conjecture']]['quantifiers']\n if r['success']:\n s[golden_quant_count] += 1\n elif r['killed']:\n k[golden_quant_count] += 1\n else:\n f[golden_quant_count] += 1\n\n ax = plt.axes()\n labels = list(sorted(set(s.keys()) | set(k.keys()) | set(f.keys())))\n plt.bar(range(len(labels)), list(k[l]+f[l]+s[l] for l in labels), edgecolor='0', color='#FFFFFF', linewidth=0.5, clip_on=False)\n plt.bar(range(len(labels)), list(k[l]+f[l] for l in labels), color='#444444',edgecolor='#444444', linewidth=0.5)\n #plt.bar(range(len(labels)), list(k[l] for l in labels), color='#e44033', linewidth=0)\n plt.xticks(range(len(labels)), labels)\n plt.ylim(0,None)\n \n plt.xlabel(\"Number of quantifiers in golden formula\")\n ax.tick_params(axis='y', left=True, width=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.legend(['Success', 'Failure'], frameon=False)\n #fig.suptitle(\"Quantifier conjunct distribution with success rate\")\n plt.savefig(\"success_by_quantifier_count.eps\", bbox_inches='tight')\n plt.savefig(\"success_by_quantifier_count.png\", bbox_inches='tight')\n\n _print(\"\\nQuant. @ success @ killed @ failed\")\n for l in labels:\n _print (l, \"@\", s[l],\"@\", k[l],\"@\", f[l])\n _print(\"\\n\")\n\n fig = plt.figure(figsize=(6,4))\n times = []\n for r in results:\n if r['success']:\n times.append(r['stats']['total_time'])\n else:\n times.append(float('Inf'))\n times.sort()\n ax = plt.axes()\n plt.plot([x+0.5 for x in range(len(times))], times, color='black')\n plt.yscale(\"log\")\n plt.xlim(0,len(times))\n plt.ylim(1,3600)\n plt.ylabel(\"Time to learn (sec)\")\n plt.xlabel(\"Conjecture (ordinal)\")\n #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n plt.savefig(\"ordinal_learning_times.eps\", bbox_inches='tight')\n plt.savefig(\"ordinal_learning_times.png\", bbox_inches='tight')\n\n # fig = plt.figure(figsize=(6,4))\n # xs = []\n # ys = []\n # for r in results:\n # if 'stats' in r:\n # xs.append(max(0.001, r['stats']['counterexample_time']/60.0))\n # ys.append(max(0.001, r['stats']['separation_time']/60.0))\n # times.sort()\n # ax = plt.axes()\n # #ax.set_aspect('equal', 'datalim')\n # plt.scatter(xs, ys, color='black')\n # plt.yscale(\"log\")\n # plt.ylim(0.001,10)\n # plt.xscale(\"log\")\n # plt.xlim(0.001,10)\n # plt.ylabel(\"Separation\")\n # plt.xlabel(\"Counterexample\")\n # #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n # plt.savefig(\"time_scatter.eps\", bbox_inches='tight')\n # plt.savefig(\"time_scatter.png\", bbox_inches='tight')\n\n c_to = 0\n s_to = 0\n for r in results:\n if 'stats' in r:\n if r['stats']['counterexample_time'] > r['timeout'] - 5:\n c_to += 1\n if r['stats']['separation_time'] > r['timeout'] - 5:\n s_to += 1\n _print(f\"counterexample timeout: {c_to}, separation timeout: {s_to}\")\n\n\n\n # fig = plt.figure(figsize=(6,4))\n # xs = []\n # ys = []\n # for r in results:\n # if 'stats' in r:\n # xs.append(r['stats']['separation_time'])\n # ys.append(r['stats']['matrix_time']/max(0.0001,r['stats']['separation_time']))\n # times.sort()\n # ax = plt.axes()\n # #ax.set_aspect('equal', 'datalim')\n # plt.scatter(xs, ys, color='black')\n # plt.ylim(0,1)\n # #plt.xscale(\"log\")\n # plt.ylabel(\"Matrix Fraction\")\n # plt.xlabel(\"Separation Time\")\n # #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n # plt.savefig(\"matrix_percentage.eps\", bbox_inches='tight')\n # plt.savefig(\"matrix_percentage.png\", bbox_inches='tight')\n\n m_heavy = 0\n m_light = 0\n lower_limit = 200\n _print(\"\\nFormula with hard to infer matrices:\")\n for r in results:\n if 'stats' in r:\n if r['stats']['separation_time'] > lower_limit:\n if r['stats']['matrix_time'] > r['stats']['separation_time']*0.5:\n m_heavy += 1\n print(r['success'],\"\\t\", desc_of(r)['golden_formula'])\n else:\n m_light += 1\n _print(f\"For examples taking > {lower_limit} sec\")\n _print(f\"matrix >50%: {m_heavy}, matrix <=50%: {m_light}\")\n\n\n errors = []\n for r in results:\n if r['killed'] and d['max_term_depth'] <= 1:\n qc = desc_of(r)['quantifiers']\n gold = desc_of(r)['golden_formula']\n errors.append((qc, gold, r['base'] + \"-\" + r['conjecture']))\n errors.sort()\n _print(\"\\nKilled Conjuncts:\")\n for (q, gold, name) in errors:\n _print(name, q, gold)\n \n errors2 = []\n for r in results:\n if not r['killed'] and not r['success']:\n qc = desc_of(r)['quantifiers']\n gold = desc_of(r)['golden_formula']\n if 'stats' in r:\n x = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n else:\n x = 0.0\n errors2.append((qc, gold, r['base'] + \"-\" + r['conjecture'], x, r))\n errors2.sort()\n _print(\"\\nFailed Conjuncts(counter frac, matrix frac, quants, name, error):\")\n # for (q, gold, name, x, r) in errors:\n # print(f\"{x:0.2f} {name}\",\"@\", q, \"@\", gold)\n for (q, gold, name, x, r) in errors2:\n if 'stats' in r:\n c_frac = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n m_frac = min(1, r['stats']['matrix_time']/max(0.001, r['stats']['separation_time']))\n error = r['stats']['error']\n else:\n c_frac = 0.0\n m_frac = 0.0\n error = \"?\"\n _print(f\"{c_frac:0.2f}\\t{m_frac:0.2f}\\t{q}\\t{name}\\t{error}\\t{r['stats']['formula_quantifiers']}\")\n \n\n _print(\"\\nFailed Conjuncts(counter frac, matrix frac, quants, name, error):\")\n # for (q, gold, name, x, r) in errors:\n # print(f\"{x:0.2f} {name}\",\"@\", q, \"@\", gold)\n cc: typing.Counter[Tuple[bool, bool]] = Counter()\n for (q, gold, name, x, r) in errors2:\n if 'stats' in r:\n c_frac = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n m_frac = min(1, r['stats']['matrix_time']/max(0.001, r['stats']['separation_time']))\n error = r['stats']['error']\n else:\n c_frac = 0.0\n m_frac = 0.0\n error = \"?\"\n reason_c = c_frac >= 0.99 or error.startswith(\"Z3\")\n reason_m = not reason_c and m_frac >= 0.95\n cc[(not reason_c, not reason_m)] += 1\n if not reason_c and not reason_m:\n _print(f\"{c_frac:0.2f}\\t{m_frac:0.2f}\\t{q}\\t{name}\\t{error}\")\n for kk,v in cc.items():\n (counter, matrix) = kk\n _print(f\"{('c < 0.99' if counter else 'c >= 0.99')}, {('m < 0.95' if matrix else 'm >= 0.95')}: {v}\") \n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "matplotlib.pyplot.rc", "matplotlib.pyplot.figure", "matplotlib.pyplot.savefig", "matplotlib.pyplot.yscale", "matplotlib.pyplot.axes", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.ylim", "matplotlib.use", "matplotlib.pyplot.xlabel" ] ]
thinhnd2104/dask
[ "995c9dcd47ec040564a66d399fffea18e3dac597" ]
[ "dask/dataframe/shuffle.py" ]
[ "import contextlib\nimport logging\nimport math\nimport shutil\nimport tempfile\nimport uuid\n\nimport numpy as np\nimport pandas as pd\nimport tlz as toolz\n\nfrom .. import base, config\nfrom ..base import compute, compute_as_if_collection, is_dask_collection, tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import ShuffleLayer, SimpleShuffleLayer\nfrom ..sizeof import sizeof\nfrom ..utils import M, digit\nfrom . import methods\nfrom .core import DataFrame, Series, _Frame, map_partitions, new_dd_object\nfrom .dispatch import group_split_dispatch, hash_object_dispatch\n\nlogger = logging.getLogger(__name__)\n\n\ndef _calculate_divisions(\n df,\n partition_col,\n repartition,\n npartitions,\n upsample=1.0,\n partition_size=128e6,\n):\n \"\"\"\n Utility function to calculate divisions for calls to `map_partitions`\n \"\"\"\n sizes = df.map_partitions(sizeof) if repartition else []\n divisions = partition_col._repartition_quantiles(npartitions, upsample=upsample)\n mins = partition_col.map_partitions(M.min)\n maxes = partition_col.map_partitions(M.max)\n divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)\n divisions = methods.tolist(divisions)\n if type(sizes) is not list:\n sizes = methods.tolist(sizes)\n mins = methods.tolist(mins)\n maxes = methods.tolist(maxes)\n\n empty_dataframe_detected = pd.isnull(divisions).all()\n if repartition or empty_dataframe_detected:\n total = sum(sizes)\n npartitions = max(math.ceil(total / partition_size), 1)\n npartitions = min(npartitions, df.npartitions)\n n = len(divisions)\n try:\n divisions = np.interp(\n x=np.linspace(0, n - 1, npartitions + 1),\n xp=np.linspace(0, n - 1, n),\n fp=divisions,\n ).tolist()\n except (TypeError, ValueError): # str type\n indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)\n divisions = [divisions[i] for i in indexes]\n\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n if pd.api.types.is_categorical_dtype(partition_col.dtype):\n dtype = partition_col.dtype\n mins = pd.Categorical(mins, dtype=dtype).codes.tolist()\n maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()\n\n return divisions, mins, maxes\n\n\ndef sort_values(\n df,\n by,\n npartitions=None,\n ascending=True,\n upsample=1.0,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\"See DataFrame.sort_values for docstring\"\"\"\n if not isinstance(by, str):\n # support [\"a\"] as input\n if isinstance(by, list) and len(by) == 1 and isinstance(by[0], str):\n by = by[0]\n else:\n raise NotImplementedError(\n \"Dataframe only supports sorting by a single column which must \"\n \"be passed as a string or a list of a single string.\\n\"\n \"You passed %s\" % str(by)\n )\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n sort_by_col = df[by]\n\n divisions, mins, maxes = _calculate_divisions(\n df, sort_by_col, repartition, npartitions, upsample, partition_size\n )\n\n if (\n mins == sorted(mins, reverse=not ascending)\n and maxes == sorted(maxes, reverse=not ascending)\n and all(\n mx < mn\n for mx, mn in zip(\n maxes[:-1] if ascending else maxes[1:],\n mins[1:] if ascending else mins[:-1],\n )\n )\n and npartitions == df.npartitions\n ):\n # divisions are in the right place\n return df.map_partitions(M.sort_values, by, ascending=ascending)\n\n df = rearrange_by_divisions(df, by, divisions, ascending=ascending)\n df = df.map_partitions(M.sort_values, by, ascending=ascending)\n return df\n\n\ndef set_index(\n df,\n index,\n npartitions=None,\n shuffle=None,\n compute=False,\n drop=True,\n upsample=1.0,\n divisions=None,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\"See _Frame.set_index for docstring\"\"\"\n if isinstance(index, Series) and index._name == df.index._name:\n return df\n if isinstance(index, (DataFrame, tuple, list)):\n # Accept [\"a\"], but not [[\"a\"]]\n if (\n isinstance(index, list)\n and len(index) == 1\n and not isinstance(index[0], list) # if index = [[\"a\"]], leave it that way\n ):\n index = index[0]\n else:\n raise NotImplementedError(\n \"Dask dataframe does not yet support multi-indexes.\\n\"\n \"You tried to index with this index: %s\\n\"\n \"Indexes must be single columns only.\" % str(index)\n )\n\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n if not isinstance(index, Series):\n index2 = df[index]\n else:\n index2 = index\n\n if divisions is None:\n divisions, mins, maxes = _calculate_divisions(\n df, index2, repartition, npartitions, upsample, partition_size\n )\n\n if (\n mins == sorted(mins)\n and maxes == sorted(maxes)\n and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))\n and npartitions == df.npartitions\n ):\n divisions = mins + [maxes[-1]]\n result = set_sorted_index(df, index, drop=drop, divisions=divisions)\n return result.map_partitions(M.sort_index)\n\n return set_partition(\n df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs\n )\n\n\ndef remove_nans(divisions):\n \"\"\"Remove nans from divisions\n\n These sometime pop up when we call min/max on an empty partition\n\n Examples\n --------\n >>> remove_nans((np.nan, 1, 2))\n [1, 1, 2]\n >>> remove_nans((1, np.nan, 2))\n [1, 2, 2]\n >>> remove_nans((1, 2, np.nan))\n [1, 2, 2]\n \"\"\"\n divisions = list(divisions)\n\n for i in range(len(divisions) - 2, -1, -1):\n if pd.isnull(divisions[i]):\n divisions[i] = divisions[i + 1]\n\n for i in range(len(divisions) - 1, -1, -1):\n if not pd.isnull(divisions[i]):\n for j in range(i + 1, len(divisions)):\n divisions[j] = divisions[i]\n break\n\n return divisions\n\n\ndef set_partition(\n df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None\n):\n \"\"\"Group DataFrame by index\n\n Sets a new index and partitions data along that index according to\n divisions. Divisions are often found by computing approximate quantiles.\n The function ``set_index`` will do both of these steps.\n\n Parameters\n ----------\n df: DataFrame/Series\n Data that we want to re-partition\n index: string or Series\n Column to become the new index\n divisions: list\n Values to form new divisions between partitions\n drop: bool, default True\n Whether to delete columns to be used as the new index\n shuffle: str (optional)\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n max_branch: int (optional)\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n See Also\n --------\n set_index\n shuffle\n partd\n \"\"\"\n meta = df._meta._constructor_sliced([0])\n if isinstance(divisions, tuple):\n # pd.isna considers tuples to be scalars. Convert to a list.\n divisions = list(divisions)\n\n if np.isscalar(index):\n dtype = df[index].dtype\n else:\n dtype = index.dtype\n\n if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):\n # Can't construct a Series[int64] when any / all of the divisions are NaN.\n divisions = df._meta._constructor_sliced(divisions)\n else:\n divisions = df._meta._constructor_sliced(divisions, dtype=dtype)\n\n if np.isscalar(index):\n partitions = df[index].map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n else:\n partitions = index.map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions, _index=index)\n\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n compute=compute,\n ignore_index=True,\n )\n\n if np.isscalar(index):\n df4 = df3.map_partitions(\n set_index_post_scalar,\n index_name=index,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n else:\n df4 = df3.map_partitions(\n set_index_post_series,\n index_name=index.name,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n\n df4.divisions = methods.tolist(divisions)\n\n return df4.map_partitions(M.sort_index)\n\n\ndef shuffle(\n df,\n index,\n shuffle=None,\n npartitions=None,\n max_branch=32,\n ignore_index=False,\n compute=None,\n):\n \"\"\"Group DataFrame by index\n\n Hash grouping of elements. After this operation all elements that have\n the same index will be in the same partition. Note that this requires\n full dataset read, serialization and shuffle. This is expensive. If\n possible you should avoid shuffles.\n\n This does not preserve a meaningful index/partitioning scheme. This is not\n deterministic if done in parallel.\n\n See Also\n --------\n set_index\n set_partition\n shuffle_disk\n \"\"\"\n list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)\n if shuffle == \"tasks\" and (isinstance(index, str) or list_like):\n # Avoid creating the \"_partitions\" column if possible.\n # We currently do this if the user is passing in\n # specific column names (and shuffle == \"tasks\").\n if isinstance(index, str):\n index = [index]\n else:\n index = list(index)\n nset = set(index)\n if nset & set(df.columns) == nset:\n return rearrange_by_column(\n df,\n index,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )\n\n if not isinstance(index, _Frame):\n index = df._select_columns_or_index(index)\n elif hasattr(index, \"to_frame\"):\n # If this is an index, we should still convert to a\n # DataFrame. Otherwise, the hashed values of a column\n # selection will not match (important when merging).\n index = index.to_frame()\n\n partitions = index.map_partitions(\n partitioning_index,\n npartitions=npartitions or df.npartitions,\n meta=df._meta._constructor_sliced([0]),\n transform_divisions=False,\n )\n df2 = df.assign(_partitions=partitions)\n df2._meta.index.name = df._meta.index.name\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n compute=compute,\n ignore_index=ignore_index,\n )\n del df3[\"_partitions\"]\n return df3\n\n\ndef rearrange_by_divisions(\n df,\n column,\n divisions,\n max_branch=None,\n shuffle=None,\n ascending=True,\n):\n \"\"\"Shuffle dataframe so that column separates along divisions\"\"\"\n divisions = df._meta._constructor_sliced(divisions)\n meta = df._meta._constructor_sliced([0])\n # Assign target output partitions to every row\n partitions = df[column].map_partitions(\n set_partitions_pre, divisions=divisions, ascending=ascending, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n\n # Perform shuffle\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n )\n del df3[\"_partitions\"]\n return df3\n\n\ndef rearrange_by_column(\n df,\n col,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n compute=None,\n ignore_index=False,\n):\n shuffle = shuffle or config.get(\"shuffle\", None) or \"disk\"\n\n # if the requested output partitions < input partitions\n # we repartition first as shuffling overhead is\n # proportionate to the number of input partitions\n\n if npartitions is not None and npartitions < df.npartitions:\n df = df.repartition(npartitions=npartitions)\n\n if shuffle == \"disk\":\n return rearrange_by_column_disk(df, col, npartitions, compute=compute)\n elif shuffle == \"tasks\":\n df2 = rearrange_by_column_tasks(\n df, col, max_branch, npartitions, ignore_index=ignore_index\n )\n if ignore_index:\n df2._meta = df2._meta.reset_index(drop=True)\n return df2\n else:\n raise NotImplementedError(\"Unknown shuffle method %s\" % shuffle)\n\n\nclass maybe_buffered_partd:\n \"\"\"\n If serialized, will return non-buffered partd. Otherwise returns a buffered partd\n \"\"\"\n\n def __init__(self, buffer=True, tempdir=None):\n self.tempdir = tempdir or config.get(\"temporary_directory\", None)\n self.buffer = buffer\n self.compression = config.get(\"dataframe.shuffle-compression\", None)\n\n def __reduce__(self):\n if self.tempdir:\n return (maybe_buffered_partd, (False, self.tempdir))\n else:\n return (maybe_buffered_partd, (False,))\n\n def __call__(self, *args, **kwargs):\n import partd\n\n path = tempfile.mkdtemp(suffix=\".partd\", dir=self.tempdir)\n\n try:\n partd_compression = (\n getattr(partd.compressed, self.compression)\n if self.compression\n else None\n )\n except AttributeError as e:\n raise ImportError(\n \"Not able to import and load {0} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n self.compression\n )\n ) from e\n file = partd.File(path)\n partd.file.cleanup_files.append(path)\n # Envelope partd file with compression, if set and available\n if partd_compression:\n file = partd_compression(file)\n if self.buffer:\n return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))\n else:\n return partd.PandasBlocks(file)\n\n\ndef rearrange_by_column_disk(df, column, npartitions=None, compute=False):\n \"\"\"Shuffle using local disk\n\n See Also\n --------\n rearrange_by_column_tasks:\n Same function, but using tasks rather than partd\n Has a more informative docstring\n \"\"\"\n if npartitions is None:\n npartitions = df.npartitions\n\n token = tokenize(df, column, npartitions)\n always_new_token = uuid.uuid1().hex\n\n p = (\"zpartd-\" + always_new_token,)\n dsk1 = {p: (maybe_buffered_partd(),)}\n\n # Partition data on disk\n name = \"shuffle-partition-\" + always_new_token\n dsk2 = {\n (name, i): (shuffle_group_3, key, column, npartitions, p)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n dependencies = []\n if compute:\n graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)\n graph = HighLevelGraph.from_collections(name, graph, dependencies=[df])\n keys = [p, sorted(dsk2)]\n pp, values = compute_as_if_collection(DataFrame, graph, keys)\n dsk1 = {p: pp}\n dsk2 = dict(zip(sorted(dsk2), values))\n else:\n dependencies.append(df)\n\n # Barrier\n barrier_token = \"barrier-\" + always_new_token\n dsk3 = {barrier_token: (barrier, list(dsk2))}\n\n # Collect groups\n name = \"shuffle-collect-\" + token\n dsk4 = {\n (name, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)\n }\n\n divisions = (None,) * (npartitions + 1)\n\n layer = toolz.merge(dsk1, dsk2, dsk3, dsk4)\n graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)\n return new_dd_object(graph, name, df._meta, divisions)\n\n\ndef _noop(x, cleanup_token):\n \"\"\"\n A task that does nothing.\n \"\"\"\n return x\n\n\ndef rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n \"\"\"Order divisions of DataFrame so that all values within column(s) align\n\n This enacts a task-based shuffle. It contains most of the tricky logic\n around the complex network of tasks. Typically before this function is\n called a new column, ``\"_partitions\"`` has been added to the dataframe,\n containing the output partition number of every row. This function\n produces a new dataframe where every row is in the proper partition. It\n accomplishes this by splitting each input partition into several pieces,\n and then concatenating pieces from different input partitions into output\n partitions. If there are enough partitions then it does this work in\n stages to avoid scheduling overhead.\n\n Lets explain the motivation for this further. Imagine that we have 1000\n input partitions and 1000 output partitions. In theory we could split each\n input into 1000 pieces, and then move the 1 000 000 resulting pieces\n around, and then concatenate them all into 1000 output groups. This would\n be fine, but the central scheduling overhead of 1 000 000 tasks would\n become a bottleneck. Instead we do this in stages so that we split each of\n the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those\n around, concatenate back down to 1000, and then do the same process again.\n This has the same result as the full transfer, but now we've moved data\n twice (expensive) but done so with only 60 000 tasks (cheap).\n\n Note that the `column` input may correspond to a list of columns (rather\n than just a single column name). In this case, the `shuffle_group` and\n `shuffle_group_2` functions will use hashing to map each row to an output\n partition. This approach may require the same rows to be hased multiple\n times, but avoids the need to assign a new \"_partitions\" column.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n column: str or list\n A column name on which we want to split, commonly ``\"_partitions\"``\n which is assigned by functions upstream. This could also be a list of\n columns (in which case shuffle_group will create a hash array/column).\n max_branch: int\n The maximum number of splits per input partition. Defaults to 32.\n If there are more partitions than this then the shuffling will occur in\n stages in order to avoid creating npartitions**2 tasks\n Increasing this number increases scheduling overhead but decreases the\n number of full-dataset transfers that we have to make.\n npartitions: Optional[int]\n The desired number of output partitions\n\n Returns\n -------\n df3: dask.dataframe.DataFrame\n\n See also\n --------\n rearrange_by_column_disk: same operation, but uses partd\n rearrange_by_column: parent function that calls this or rearrange_by_column_disk\n shuffle_group: does the actual splitting per-partition\n \"\"\"\n\n max_branch = max_branch or 32\n\n if (npartitions or df.npartitions) <= max_branch:\n # We are creating a small number of output partitions.\n # No need for staged shuffling. Staged shuffling will\n # sometimes require extra work/communication in this case.\n token = tokenize(df, column, npartitions)\n shuffle_name = f\"simple-shuffle-{token}\"\n npartitions = npartitions or df.npartitions\n shuffle_layer = SimpleShuffleLayer(\n shuffle_name,\n column,\n npartitions,\n df.npartitions,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n shuffle_name, shuffle_layer, dependencies=[df]\n )\n return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))\n\n n = df.npartitions\n stages = int(math.ceil(math.log(n) / math.log(max_branch)))\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]\n\n npartitions_orig = df.npartitions\n token = tokenize(df, stages, column, n, k)\n for stage in range(stages):\n stage_name = f\"shuffle-{stage}-{token}\"\n stage_layer = ShuffleLayer(\n stage_name,\n column,\n inputs,\n stage,\n npartitions,\n n,\n k,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n stage_name, stage_layer, dependencies=[df]\n )\n df = new_dd_object(graph, stage_name, df._meta, df.divisions)\n\n if npartitions is not None and npartitions != npartitions_orig:\n token = tokenize(df, npartitions)\n repartition_group_token = \"repartition-group-\" + token\n\n dsk = {\n (repartition_group_token, i): (\n shuffle_group_2,\n k,\n column,\n ignore_index,\n npartitions,\n )\n for i, k in enumerate(df.__dask_keys__())\n }\n\n repartition_get_name = \"repartition-get-\" + token\n\n for p in range(npartitions):\n dsk[(repartition_get_name, p)] = (\n shuffle_group_get,\n (repartition_group_token, p % npartitions_orig),\n p,\n )\n\n graph2 = HighLevelGraph.from_collections(\n repartition_get_name, dsk, dependencies=[df]\n )\n df2 = new_dd_object(\n graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)\n )\n else:\n df2 = df\n df2.divisions = (None,) * (npartitions_orig + 1)\n\n return df2\n\n\n########################################################\n# Various convenience functions to be run by the above #\n########################################################\n\n\ndef partitioning_index(df, npartitions):\n \"\"\"\n Computes a deterministic index mapping each record to a partition.\n\n Identical rows are mapped to the same partition.\n\n Parameters\n ----------\n df : DataFrame/Series/Index\n npartitions : int\n The number of partitions to group into.\n\n Returns\n -------\n partitions : ndarray\n An array of int64 values mapping each record to a partition.\n \"\"\"\n return hash_object_dispatch(df, index=False) % int(npartitions)\n\n\ndef barrier(args):\n list(args)\n return 0\n\n\ndef cleanup_partd_files(p, keys):\n \"\"\"\n Cleanup the files in a partd.File dataset.\n\n Parameters\n ----------\n p : partd.Interface\n File or Encode wrapping a file should be OK.\n keys: List\n Just for scheduling purposes, not actually used.\n \"\"\"\n import partd\n\n if isinstance(p, partd.Encode):\n maybe_file = p.partd\n else:\n maybe_file\n\n if isinstance(maybe_file, partd.File):\n path = maybe_file.path\n else:\n path = None\n\n if path:\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef collect(p, part, meta, barrier_token):\n \"\"\"Collect partitions from partd, yield dataframes\"\"\"\n with ensure_cleanup_on_exception(p):\n res = p.get(part)\n return res if len(res) > 0 else meta\n\n\ndef set_partitions_pre(s, divisions, ascending=True):\n try:\n if ascending:\n partitions = divisions.searchsorted(s, side=\"right\") - 1\n else:\n partitions = len(divisions) - divisions.searchsorted(s, side=\"right\") - 1\n except TypeError:\n # When `searchsorted` fails with `TypeError`, it may be\n # caused by nulls in `s`. Try again with the null-values\n # explicitly mapped to the first partition.\n partitions = np.empty(len(s), dtype=\"int32\")\n partitions[s.isna()] = 0\n not_null = s.notna()\n if ascending:\n partitions[not_null] = divisions.searchsorted(s[not_null], side=\"right\") - 1\n else:\n partitions[not_null] = (\n len(divisions) - divisions.searchsorted(s[not_null], side=\"right\") - 1\n )\n partitions[(s >= divisions.iloc[-1]).values] = (\n len(divisions) - 2 if ascending else 0\n )\n return partitions\n\n\ndef shuffle_group_2(df, cols, ignore_index, nparts):\n if not len(df):\n return {}, df\n\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]].astype(np.int32)\n else:\n ind = (\n hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)\n ).astype(np.int32)\n\n n = ind.max() + 1\n result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)\n return result2, df.iloc[:0]\n\n\ndef shuffle_group_get(g_head, i):\n g, head = g_head\n if i in g:\n return g[i]\n else:\n return head\n\n\ndef shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):\n \"\"\"Splits dataframe into groups\n\n The group is determined by their final partition, and which stage we are in\n in the shuffle\n\n Parameters\n ----------\n df: DataFrame\n cols: str or list\n Column name(s) on which to split the dataframe. If ``cols`` is not\n \"_partitions\", hashing will be used to determine target partition\n stage: int\n We shuffle dataframes with many partitions we in a few stages to avoid\n a quadratic number of tasks. This number corresponds to which stage\n we're in, starting from zero up to some small integer\n k: int\n Desired number of splits from this dataframe\n npartition: int\n Total number of output partitions for the full dataframe\n nfinal: int\n Total number of output partitions after repartitioning\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..k} to dataframes such that the\n hash values of ``df[col]`` are well partitioned.\n \"\"\"\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]]\n else:\n ind = hash_object_dispatch(df[cols] if cols else df, index=False)\n if nfinal and nfinal != npartitions:\n ind = ind % int(nfinal)\n\n c = ind.values\n typ = np.min_scalar_type(npartitions * 2)\n\n c = np.mod(c, npartitions).astype(typ, copy=False)\n np.floor_divide(c, k ** stage, out=c)\n np.mod(c, k, out=c)\n\n return group_split_dispatch(df, c, k, ignore_index=ignore_index)\n\n\[email protected]\ndef ensure_cleanup_on_exception(p):\n \"\"\"Ensure a partd.File is cleaned up.\n\n We have several tasks referring to a `partd.File` instance. We want to\n ensure that the file is cleaned up if and only if there's an exception\n in the tasks using the `partd.File`.\n \"\"\"\n try:\n yield\n except Exception:\n # the function (e.g. shuffle_group_3) had an internal exception.\n # We'll cleanup our temporary files and re-raise.\n try:\n p.drop()\n except Exception:\n logger.exception(\"ignoring exception in ensure_cleanup_on_exception\")\n raise\n\n\ndef shuffle_group_3(df, col, npartitions, p):\n with ensure_cleanup_on_exception(p):\n g = df.groupby(col)\n d = {i: g.get_group(i) for i in g.groups}\n p.append(d, fsync=True)\n\n\ndef set_index_post_scalar(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(index_name, drop=drop)\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef set_index_post_series(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(\"_index\", drop=True)\n df2.index.name = index_name\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef drop_overlap(df, index):\n return df.drop(index) if index in df.index else df\n\n\ndef get_overlap(df, index):\n return df.loc[[index]] if index in df.index else df._constructor()\n\n\ndef fix_overlap(ddf, overlap):\n \"\"\"Ensures that the upper bound on each partition of ddf (except the last) is exclusive\"\"\"\n name = \"fix-overlap-\" + tokenize(ddf, overlap)\n n = len(ddf.divisions) - 1\n dsk = {(name, i): (ddf._name, i) for i in range(n)}\n\n frames = []\n for i in overlap:\n\n # `frames` is a list of data from previous partitions that we may want to\n # move to partition i. Here, we add \"overlap\" from the previous partition\n # (i-1) to this list.\n frames.append((get_overlap, (ddf._name, i - 1), ddf.divisions[i]))\n\n # Make sure that any data added from partition i-1 to `frames` is removed\n # from partition i-1.\n dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], ddf.divisions[i])\n\n # We do not want to move \"overlap\" from the previous partition (i-1) into\n # this partition (i) if the data from this partition will need to be moved\n # to the next partition (i+1) anyway. If we concatenate data too early,\n # we may lose rows (https://github.com/dask/dask/issues/6972).\n if i == ddf.npartitions - 2 or ddf.divisions[i] != ddf.divisions[i + 1]:\n frames.append((ddf._name, i))\n dsk[(name, i)] = (methods.concat, frames)\n frames = []\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, ddf._meta, ddf.divisions)\n\n\ndef compute_and_set_divisions(df, **kwargs):\n mins = df.index.map_partitions(M.min, meta=df.index)\n maxes = df.index.map_partitions(M.max, meta=df.index)\n mins, maxes = compute(mins, maxes, **kwargs)\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n\n if (\n sorted(mins) != list(mins)\n or sorted(maxes) != list(maxes)\n or any(a > b for a, b in zip(mins, maxes))\n ):\n raise ValueError(\n \"Partitions must be sorted ascending with the index\", mins, maxes\n )\n\n df.divisions = tuple(mins) + (list(maxes)[-1],)\n\n overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]\n return fix_overlap(df, overlap) if overlap else df\n\n\ndef set_sorted_index(df, index, drop=True, divisions=None, **kwargs):\n if not isinstance(index, Series):\n meta = df._meta.set_index(index, drop=drop)\n else:\n meta = df._meta.set_index(index._meta, drop=drop)\n\n result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)\n\n if not divisions:\n return compute_and_set_divisions(result, **kwargs)\n elif len(divisions) != len(df.divisions):\n msg = (\n \"When doing `df.set_index(col, sorted=True, divisions=...)`, \"\n \"divisions indicates known splits in the index column. In this \"\n \"case divisions must be the same length as the existing \"\n \"divisions in `df`\\n\\n\"\n \"If the intent is to repartition into new divisions after \"\n \"setting the index, you probably want:\\n\\n\"\n \"`df.set_index(col, sorted=True).repartition(divisions=divisions)`\"\n )\n raise ValueError(msg)\n\n result.divisions = tuple(divisions)\n return result\n" ]
[ [ "pandas.api.types.is_categorical_dtype", "numpy.min_scalar_type", "pandas.api.types.is_list_like", "pandas.api.types.is_integer_dtype", "numpy.floor_divide", "pandas.Categorical", "numpy.mod", "pandas.isnull", "numpy.isscalar", "numpy.linspace", "pandas.isna" ] ]
XidaoW/iFac
[ "43c3103051dfffefe8e0eb09fc7fd53b14754519" ]
[ "src/src/engine/myutil/sampler.py" ]
[ "#!/usr/bin/python\n# -*- coding:utf-8 -*-\n'''\nCreated on 2015/04/13\n\n@author: drumichiro\n'''\nimport numpy as np\nimport matplotlib.mlab as mlab\n\n\ndef generateSample(baseLength, mu, sigma, distFunc):\n x = np.empty([])\n np.random.seed(0)\n for i1 in range(len(mu)):\n data = distFunc(mu[i1], sigma[i1], baseLength)\n x = data if x.shape == () else np.append(x, data, axis=0)\n return x\n\n\ndef generateScalarSample(baseLength, mu, sigma):\n return generateSample(baseLength, mu, sigma, np.random.normal)\n\n\ndef generateVectorSample(baseLength, mu, sigma):\n return generateSample(baseLength, mu, sigma,\n np.random.multivariate_normal)\n\n\ndef gaussian1d(x, mu, sigma):\n return mlab.normpdf(x, mu, sigma)\n\n\ndef gaussian2d(x, mu, sigma):\n return mlab.bivariate_normal(x[..., 0], x[..., 1],\n np.sqrt(sigma[0, 0]), np.sqrt(sigma[1, 1]),\n mu[0], mu[1], sigma[0, 1])\n" ]
[ [ "matplotlib.mlab.normpdf", "numpy.append", "numpy.empty", "numpy.random.seed", "numpy.sqrt" ] ]
bainro/garage
[ "c5afbb19524792d9bbad9b9741f45e1d48ddca3d" ]
[ "src/garage/tf/policies/categorical_cnn_policy.py" ]
[ "\"\"\"CategoricalCNNPolicy with model.\"\"\"\nimport akro\nimport tensorflow as tf\n\nfrom garage.tf.distributions import Categorical\nfrom garage.tf.models import CNNModel\nfrom garage.tf.models import MLPModel\nfrom garage.tf.models import Sequential\nfrom garage.tf.policies import StochasticPolicy\n\n\nclass CategoricalCNNPolicy(StochasticPolicy):\n \"\"\"CategoricalCNNPolicy.\n\n A policy that contains a CNN and a MLP to make prediction based on\n a categorical distribution.\n\n It only works with akro.Discrete action space.\n\n Args:\n env_spec (garage.envs.env_spec.EnvSpec): Environment specification.\n conv_filter_sizes(tuple[int]): Dimension of the filters. For example,\n (3, 5) means there are two convolutional layers. The filter for\n first layer is of dimension (3 x 3) and the second one is of\n dimension (5 x 5).\n conv_filters(tuple[int]): Number of filters. For example, (3, 32) means\n there are two convolutional layers. The filter for the first layer\n has 3 channels and the second one with 32 channels.\n conv_strides(tuple[int]): The stride of the sliding window. For\n example, (1, 2) means there are two convolutional layers. The\n stride of the filter for first layer is 1 and that of the second\n layer is 2.\n conv_pad (str): The type of padding algorithm to use,\n either 'SAME' or 'VALID'.\n name (str): Policy name, also the variable scope of the policy.\n hidden_sizes (list[int]): Output dimension of dense layer(s).\n For example, (32, 32) means the MLP of this policy consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n tf.Tensor.\n layer_normalization (bool): Bool for using layer normalization or not.\n \"\"\"\n\n def __init__(self,\n env_spec,\n conv_filters,\n conv_filter_sizes,\n conv_strides,\n conv_pad,\n name='CategoricalCNNPolicy',\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.relu,\n hidden_w_init=tf.initializers.glorot_uniform(),\n hidden_b_init=tf.zeros_initializer(),\n output_nonlinearity=tf.nn.softmax,\n output_w_init=tf.initializers.glorot_uniform(),\n output_b_init=tf.zeros_initializer(),\n layer_normalization=False):\n if not isinstance(env_spec.action_space, akro.Discrete):\n raise ValueError(\n 'CategoricalCNNPolicy only works with akro.Discrete action '\n 'space.')\n\n if not isinstance(env_spec.observation_space, akro.Box) or \\\n not len(env_spec.observation_space.shape) in (2, 3):\n raise ValueError(\n '{} can only process 2D, 3D akro.Image or'\n ' akro.Box observations, but received an env_spec with '\n 'observation_space of type {} and shape {}'.format(\n type(self).__name__,\n type(env_spec.observation_space).__name__,\n env_spec.observation_space.shape))\n\n super().__init__(name, env_spec)\n self.obs_dim = env_spec.observation_space.shape\n self.action_dim = env_spec.action_space.n\n\n self.model = Sequential(\n CNNModel(filter_dims=conv_filter_sizes,\n num_filters=conv_filters,\n strides=conv_strides,\n padding=conv_pad,\n hidden_nonlinearity=hidden_nonlinearity,\n name='CNNModel'),\n MLPModel(output_dim=self.action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization,\n name='MLPModel'))\n\n self._initialize()\n\n def _initialize(self):\n if isinstance(self.env_spec.observation_space, akro.Image):\n state_input = tf.compat.v1.placeholder(tf.uint8,\n shape=(None, ) +\n self.obs_dim)\n state_input = tf.cast(state_input, tf.float32)\n state_input /= 255.0\n else:\n state_input = tf.compat.v1.placeholder(tf.float32,\n shape=(None, ) +\n self.obs_dim)\n\n with tf.compat.v1.variable_scope(self.name) as vs:\n self._variable_scope = vs\n self.model.build(state_input)\n\n self._f_prob = tf.compat.v1.get_default_session().make_callable(\n self.model.outputs, feed_list=[self.model.input])\n\n @property\n def vectorized(self):\n \"\"\"Vectorized or not.\n\n Returns:\n bool: True if primitive supports vectorized operations.\n \"\"\"\n return True\n\n def dist_info_sym(self, obs_var, state_info_vars=None, name=None):\n \"\"\"Build a symbolic graph of the distribution parameters.\n\n Args:\n obs_var (tf.Tensor): Tensor input for symbolic graph.\n state_info_vars (dict[np.ndarray]): Extra state information, e.g.\n previous action.\n name (str): Name for symbolic graph.\n\n Returns:\n dict[tf.Tensor]: Outputs of the symbolic graph of distribution\n parameters.\n\n \"\"\"\n with tf.compat.v1.variable_scope(self._variable_scope):\n if isinstance(self.env_spec.observation_space, akro.Image):\n obs_var = tf.cast(obs_var, tf.float32) / 255.0\n\n prob = self.model.build(obs_var, name=name)\n return dict(prob=prob)\n\n def dist_info(self, obs, state_infos=None):\n \"\"\"Get distribution parameters.\n\n Args:\n obs (np.ndarray): Observation input.\n state_infos (dict[np.ndarray]): Extra state information, e.g.\n previous action.\n\n Returns:\n dict[np.ndarray]: Distribution parameters.\n\n \"\"\"\n prob = self._f_prob(obs)\n return dict(prob=prob)\n\n def get_action(self, observation):\n \"\"\"Get single action from this policy for the input observation.\n\n Args:\n observation (numpy.ndarray): Observation from environment.\n\n Returns:\n numpy.ndarray: Predicted action.\n dict[str: np.ndarray]: Action distribution.\n\n \"\"\"\n if len(observation.shape) < len(self.obs_dim):\n observation = self.env_spec.observation_space.unflatten(\n observation)\n prob = self._f_prob([observation])[0]\n action = self.action_space.weighted_sample(prob)\n return action, dict(prob=prob)\n\n def get_actions(self, observations):\n \"\"\"Get multiple actions from this policy for the input observations.\n\n Args:\n observations (numpy.ndarray): Observations from environment.\n\n Returns:\n numpy.ndarray: Predicted actions.\n dict[str: np.ndarray]: Action distributions.\n\n \"\"\"\n if len(observations[0].shape) < len(self.obs_dim):\n observations = self.env_spec.observation_space.unflatten_n(\n observations)\n probs = self._f_prob(observations)\n actions = list(map(self.action_space.weighted_sample, probs))\n return actions, dict(prob=probs)\n\n @property\n def distribution(self):\n \"\"\"Policy distribution.\n\n Returns:\n garage.tf.distributions.Categorical: Policy distribution.\n\n \"\"\"\n return Categorical(self.action_dim)\n\n def __getstate__(self):\n \"\"\"Object.__getstate__.\n\n Returns:\n dict: The state to be pickled for the instance.\n\n \"\"\"\n new_dict = super().__getstate__()\n del new_dict['_f_prob']\n return new_dict\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\n\n Args:\n state (dict): Unpickled state.\n\n \"\"\"\n super().__setstate__(state)\n self._initialize()\n" ]
[ [ "tensorflow.compat.v1.placeholder", "tensorflow.zeros_initializer", "tensorflow.compat.v1.get_default_session", "tensorflow.cast", "tensorflow.compat.v1.variable_scope", "tensorflow.initializers.glorot_uniform" ] ]
choishingwan/PRScsx
[ "f7941665c2d86754dbc94a25f2d6d19aea23be32" ]
[ "gigrnd.py" ]
[ "#!/usr/bin/env python\n\n\"\"\"\nRandom variate generator for the generalized inverse Gaussian distribution.\nReference: L Devroye. Random variate generation for the generalized inverse Gaussian distribution.\n Statistics and Computing, 24(2):239-246, 2014.\n\n\"\"\"\n\n\nimport math\nfrom scipy import random\n\n\ndef psi(x, alpha, lam):\n f = -alpha*(math.cosh(x)-1)-lam*(math.exp(x)-x-1)\n return f\n\n\ndef dpsi(x, alpha, lam):\n f = -alpha*math.sinh(x)-lam*(math.exp(x)-1)\n return f\n\n\ndef g(x, sd, td, f1, f2):\n if (x >= -sd) and (x <= td):\n f = 1\n elif x > td:\n f = f1\n elif x < -sd:\n f = f2\n\n return f\n\n\ndef gigrnd(p, a, b):\n # setup -- sample from the two-parameter version gig(lam,omega)\n p = float(p); a = float(a); b = float(b)\n lam = p\n omega = math.sqrt(a*b)\n\n if lam < 0:\n lam = -lam\n swap = True\n else:\n swap = False\n\n alpha = math.sqrt(math.pow(omega,2)+math.pow(lam,2))-lam\n\n # find t\n x = -psi(1, alpha, lam)\n if (x >= 1/2) and (x <= 2):\n t = 1\n elif x > 2:\n t = math.sqrt(2/(alpha+lam))\n elif x < 1/2:\n t = math.log(4/(alpha+2*lam))\n\n # find s\n x = -psi(-1, alpha, lam)\n if (x >= 1/2) and (x <= 2):\n s = 1\n elif x > 2:\n s = math.sqrt(4/(alpha*math.cosh(1)+lam))\n elif x < 1/2:\n if alpha == 0:\n s = 1/lam\n else:\n if lam==0:\n s = math.log(1+1/alpha+math.sqrt(1/math.pow(alpha,2)+2/alpha))\n else:\n s = min(1/lam, math.log(1+1/alpha+math.sqrt(1/math.pow(alpha,2)+2/alpha)))\n\n # find auxiliary parameters\n eta = -psi(t, alpha, lam)\n zeta = -dpsi(t, alpha, lam)\n theta = -psi(-s, alpha, lam)\n xi = dpsi(-s, alpha, lam)\n\n p = 1/xi\n r = 1/zeta\n\n td = t-r*eta\n sd = s-p*theta\n q = td+sd\n\n # random variate generation\n while True:\n U = random.random()\n V = random.random()\n W = random.random()\n if U < q/(p+q+r):\n rnd = -sd+q*V\n elif U < (q+r)/(p+q+r):\n rnd = td-r*math.log(V)\n else:\n rnd = -sd+p*math.log(V)\n\n f1 = math.exp(-eta-zeta*(rnd-t))\n f2 = math.exp(-theta+xi*(rnd+s))\n if W*g(rnd, sd, td, f1, f2) <= math.exp(psi(rnd, alpha, lam)):\n break\n\n # transform back to the three-parameter version gig(p,a,b)\n rnd = math.exp(rnd)*(lam/omega+math.sqrt(1+math.pow(lam,2)/math.pow(omega,2)))\n if swap:\n rnd = 1/rnd\n\n rnd = rnd/math.sqrt(a/b)\n return rnd\n\n\n" ]
[ [ "scipy.random.random" ] ]
landreman/pyQSC
[ "75f34d62c24eb94f481632ee0e1bf260d7581f2a" ]
[ "qsc/qsc.py" ]
[ "\"\"\"\nThis module contains the top-level routines for the quasisymmetric\nstellarator construction.\n\"\"\"\n\nimport logging\nimport numpy as np\nfrom scipy.io import netcdf\n#from numba import jit\n\n#logging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nclass Qsc():\n \"\"\"\n This is the main class for representing the quasisymmetric\n stellarator construction.\n \"\"\"\n \n # Import methods that are defined in separate files:\n from .init_axis import init_axis, convert_to_spline\n from .calculate_r1 import _residual, _jacobian, solve_sigma_equation, \\\n _determine_helicity, r1_diagnostics\n from .grad_B_tensor import calculate_grad_B_tensor, calculate_grad_grad_B_tensor, \\\n Bfield_cylindrical, Bfield_cartesian, grad_B_tensor_cartesian, \\\n grad_grad_B_tensor_cylindrical, grad_grad_B_tensor_cartesian\n from .calculate_r2 import calculate_r2\n from .calculate_r3 import calculate_r3\n from .mercier import mercier\n from .r_singularity import calculate_r_singularity\n from .plot import plot, plot_boundary, get_boundary, B_fieldline, B_contour, plot_axis\n from .Frenet_to_cylindrical import Frenet_to_cylindrical\n from .to_vmec import to_vmec\n from .util import B_mag\n \n def __init__(self, rc, zs, rs=[], zc=[], nfp=1, etabar=1., sigma0=0., B0=1.,\n I2=0., sG=1, spsi=1, nphi=61, B2s=0., B2c=0., p2=0., order=\"r1\"):\n \"\"\"\n Create a quasisymmetric stellarator.\n \"\"\"\n # First, force {rc, zs, rs, zc} to have the same length, for\n # simplicity.\n nfourier = np.max([len(rc), len(zs), len(rs), len(zc)])\n self.nfourier = nfourier\n self.rc = np.zeros(nfourier)\n self.zs = np.zeros(nfourier)\n self.rs = np.zeros(nfourier)\n self.zc = np.zeros(nfourier)\n self.rc[:len(rc)] = rc\n self.zs[:len(zs)] = zs\n self.rs[:len(rs)] = rs\n self.zc[:len(zc)] = zc\n\n # Force nphi to be odd:\n if np.mod(nphi, 2) == 0:\n nphi += 1\n\n if sG != 1 and sG != -1:\n raise ValueError('sG must be +1 or -1')\n \n if spsi != 1 and spsi != -1:\n raise ValueError('spsi must be +1 or -1')\n\n self.nfp = nfp\n self.etabar = etabar\n self.sigma0 = sigma0\n self.B0 = B0\n self.I2 = I2\n self.sG = sG\n self.spsi = spsi\n self.nphi = nphi\n self.B2s = B2s\n self.B2c = B2c\n self.p2 = p2\n self.order = order\n self.min_R0_threshold = 0.3\n self._set_names()\n\n self.calculate()\n\n def change_nfourier(self, nfourier_new):\n \"\"\"\n Resize the arrays of Fourier amplitudes. You can either increase\n or decrease nfourier.\n \"\"\"\n rc_old = self.rc\n rs_old = self.rs\n zc_old = self.zc\n zs_old = self.zs\n index = np.min((self.nfourier, nfourier_new))\n self.rc = np.zeros(nfourier_new)\n self.rs = np.zeros(nfourier_new)\n self.zc = np.zeros(nfourier_new)\n self.zs = np.zeros(nfourier_new)\n self.rc[:index] = rc_old[:index]\n self.rs[:index] = rs_old[:index]\n self.zc[:index] = zc_old[:index]\n self.zs[:index] = zs_old[:index]\n nfourier_old = self.nfourier\n self.nfourier = nfourier_new\n self._set_names()\n # No need to recalculate if we increased the Fourier\n # resolution, only if we decreased it.\n if nfourier_new < nfourier_old:\n self.calculate()\n\n def calculate(self):\n \"\"\"\n Driver for the main calculations.\n \"\"\"\n self.init_axis()\n self.solve_sigma_equation()\n self.r1_diagnostics()\n if self.order != 'r1':\n self.calculate_r2()\n if self.order == 'r3':\n self.calculate_r3()\n \n def get_dofs(self):\n \"\"\"\n Return a 1D numpy vector of all possible optimizable\n degrees-of-freedom, for simsopt.\n \"\"\"\n return np.concatenate((self.rc, self.zs, self.rs, self.zc,\n np.array([self.etabar, self.sigma0, self.B2s, self.B2c, self.p2, self.I2, self.B0])))\n\n def set_dofs(self, x):\n \"\"\"\n For interaction with simsopt, set the optimizable degrees of\n freedom from a 1D numpy vector.\n \"\"\"\n assert len(x) == self.nfourier * 4 + 7\n self.rc = x[self.nfourier * 0 : self.nfourier * 1]\n self.zs = x[self.nfourier * 1 : self.nfourier * 2]\n self.rs = x[self.nfourier * 2 : self.nfourier * 3]\n self.zc = x[self.nfourier * 3 : self.nfourier * 4]\n self.etabar = x[self.nfourier * 4 + 0]\n self.sigma0 = x[self.nfourier * 4 + 1]\n self.B2s = x[self.nfourier * 4 + 2]\n self.B2c = x[self.nfourier * 4 + 3]\n self.p2 = x[self.nfourier * 4 + 4]\n self.I2 = x[self.nfourier * 4 + 5]\n self.B0 = x[self.nfourier * 4 + 6]\n self.calculate()\n logger.info('set_dofs called with x={}. Now iota={}, elongation={}'.format(x, self.iota, self.max_elongation))\n \n def _set_names(self):\n \"\"\"\n For simsopt, sets the list of names for each degree of freedom.\n \"\"\"\n names = []\n names += ['rc({})'.format(j) for j in range(self.nfourier)]\n names += ['zs({})'.format(j) for j in range(self.nfourier)]\n names += ['rs({})'.format(j) for j in range(self.nfourier)]\n names += ['zc({})'.format(j) for j in range(self.nfourier)]\n names += ['etabar', 'sigma0', 'B2s', 'B2c', 'p2', 'I2', 'B0']\n self.names = names\n\n @classmethod\n def from_paper(cls, name, **kwargs):\n \"\"\"\n Get one of the configurations that has been used in our papers.\n Available values for ``name`` are\n ``\"r1 section 5.1\"``,\n ``\"r1 section 5.2\"``,\n ``\"r1 section 5.3\"``,\n ``\"r2 section 5.1\"``,\n ``\"r2 section 5.2\"``,\n ``\"r2 section 5.3\"``,\n ``\"r2 section 5.4\"``, and\n ``\"r2 section 5.5\"``.\n These last 5 configurations can also be obtained by specifying an integer 1-5 for ``name``.\n The configurations that begin with ``\"r1\"`` refer to sections in \n Landreman, Sengupta, and Plunk, Journal of Plasma Physics 85, 905850103 (2019).\n The configurations that begin with ``\"r2\"`` refer to sections in \n Landreman and Sengupta, Journal of Plasma Physics 85, 815850601 (2019).\n\n You can specify any other arguments of the ``Qsc`` constructor\n in ``kwargs``. You can also use ``kwargs`` to override any of\n the properties of the configurations from the papers. For\n instance, you can modify the value of ``etabar`` in the first\n example using\n\n .. code-block::\n\n q = qsc.Qsc.from_paper('r1 section 5.1', etabar=1.1)\n \"\"\"\n\n def add_default_args(kwargs_old, **kwargs_new):\n \"\"\"\n Take any key-value arguments in ``kwargs_new`` and treat them as\n defaults, adding them to the dict ``kwargs_old`` only if\n they are not specified there.\n \"\"\"\n for key in kwargs_new:\n if key not in kwargs_old:\n kwargs_old[key] = kwargs_new[key]\n\n \n if name == \"r1 section 5.1\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.1 \"\"\"\n add_default_args(kwargs, rc=[1, 0.045], zs=[0, -0.045], nfp=3, etabar=-0.9)\n \n elif name == \"r1 section 5.2\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.2 \"\"\"\n add_default_args(kwargs, rc=[1, 0.265], zs=[0, -0.21], nfp=4, etabar=-2.25)\n \n elif name == \"r1 section 5.3\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.3 \"\"\"\n add_default_args(kwargs, rc=[1, 0.042], zs=[0, -0.042], zc=[0, -0.025], nfp=3, etabar=-1.1, sigma0=-0.6)\n \n elif name == \"r2 section 5.1\" or name == '5.1' or name == 1:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.1 \"\"\"\n add_default_args(kwargs, rc=[1, 0.155, 0.0102], zs=[0, 0.154, 0.0111], nfp=2, etabar=0.64, order='r3', B2c=-0.00322)\n \n elif name == \"r2 section 5.2\" or name == '5.2' or name == 2:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.2 \"\"\"\n add_default_args(kwargs, rc=[1, 0.173, 0.0168, 0.00101], zs=[0, 0.159, 0.0165, 0.000985], nfp=2, etabar=0.632, order='r3', B2c=-0.158)\n \n elif name == \"r2 section 5.3\" or name == '5.3' or name == 3:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.3 \"\"\"\n add_default_args(kwargs, rc=[1, 0.09], zs=[0, -0.09], nfp=2, etabar=0.95, I2=0.9, order='r3', B2c=-0.7, p2=-600000.)\n \n elif name == \"r2 section 5.4\" or name == '5.4' or name == 4:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.4 \"\"\"\n add_default_args(kwargs, rc=[1, 0.17, 0.01804, 0.001409, 5.877e-05],\n zs=[0, 0.1581, 0.01820, 0.001548, 7.772e-05], nfp=4, etabar=1.569, order='r3', B2c=0.1348)\n \n elif name == \"r2 section 5.5\" or name == '5.5' or name == 5:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.5 \"\"\"\n add_default_args(kwargs, rc=[1, 0.3], zs=[0, 0.3], nfp=5, etabar=2.5, sigma0=0.3, I2=1.6, order='r3', B2c=1., B2s=3., p2=-0.5e7)\n\n elif name == \"LandremanPaul2021QA\" or name == \"precise QA\":\n \"\"\"\n A fit of the near-axis model to the quasi-axisymmetric\n configuration in Landreman & Paul, arXiv:2108.03711 (2021).\n\n The fit was performed to the boozmn data using the script\n 20200621-01-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=2,\n rc=[1.0038581971135636, 0.18400998741139907, 0.021723381370503204, 0.0025968236014410812, 0.00030601568477064874, 3.5540509760304384e-05, 4.102693907398271e-06, 5.154300428457222e-07, 4.8802742243232844e-08, 7.3011320375259876e-09],\n zs=[0.0, -0.1581148860568176, -0.02060702320552523, -0.002558840496952667, -0.0003061368667524159, -3.600111450532304e-05, -4.174376962124085e-06, -4.557462755956434e-07, -8.173481495049928e-08, -3.732477282851326e-09],\n B0=1.006541121335688,\n etabar=-0.6783912804454629,\n B2c=0.26859318908803137,\n nphi=99,\n order='r3')\n\n elif name == \"precise QA+well\":\n \"\"\"\n A fit of the near-axis model to the precise quasi-axisymmetric\n configuration from SIMSOPT with magnetic well.\n\n The fit was performed to the boozmn data using the script\n 20200621-01-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=2,\n rc=[1.0145598919163676, 0.2106377247598754, 0.025469267136340394, 0.0026773601516136727, 0.00021104172568911153, 7.891887175655046e-06, -8.216044358250985e-07, -2.379942694112007e-07, -2.5495108673798585e-08, 1.1679227114962395e-08, 8.961288962248274e-09],\n zs=[0.0, -0.14607192982551795, -0.021340448470388084, -0.002558983303282255, -0.0002355043952788449, -1.2752278964149462e-05, 3.673356209179739e-07, 9.261098628194352e-08, -7.976283362938471e-09, -4.4204430633540756e-08, -1.6019372369445714e-08],\n B0=1.0117071561808106,\n etabar=-0.5064143402495729,\n B2c=-0.2749140163639202,\n nphi=99,\n order='r3')\n \n elif name == \"LandremanPaul2021QH\" or name == \"precise QH\":\n \"\"\"\n A fit of the near-axis model to the quasi-helically symmetric\n configuration in Landreman & Paul, arXiv:2108.03711 (2021).\n\n The fit was performed to the boozmn data using the script\n 20211001-02-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=4,\n rc=[1.0033608429348413, 0.19993025252481125, 0.03142704185268144, 0.004672593645851904, 0.0005589954792333977, 3.298415996551805e-05, -7.337736061708705e-06, -2.8829857667619663e-06, -4.51059545517434e-07],\n zs=[0.0, 0.1788824025525348, 0.028597666614604524, 0.004302393796260442, 0.0005283708386982674, 3.5146899855826326e-05, -5.907671188908183e-06, -2.3945326611145963e-06, -6.87509350019021e-07],\n B0=1.003244143729638,\n etabar=-1.5002839921360023,\n B2c=0.37896407142157423,\n nphi=99,\n order='r3')\n\n elif name == \"precise QH+well\":\n \"\"\"\n A fit of the near-axis model to the precise quasi-helically symmetric\n configuration from SIMSOPT with magnetic well.\n\n The fit was performed to the boozmn data using the script\n 20211001-02-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=4,\n rc=[1.000474932581454, 0.16345392520298313, 0.02176330066615466, 0.0023779201451133163, 0.00014141976024376502, -1.0595894482659743e-05, -2.9989267970578764e-06, 3.464574408947338e-08],\n zs=[0.0, 0.12501739099323073, 0.019051257169780858, 0.0023674771227236587, 0.0001865909743321566, -2.2659053455802824e-06, -2.368335337174369e-06, -1.8521248561490157e-08],\n B0=0.999440074325872,\n etabar=-1.2115187546668142,\n B2c=0.6916862277166693,\n nphi=99,\n order='r3')\n \n else:\n raise ValueError('Unrecognized configuration name')\n\n return cls(**kwargs)\n\n @classmethod\n def from_cxx(cls, filename):\n \"\"\"\n Load a configuration from a ``qsc_out.<extension>.nc`` output file\n that was generated by the C++ version of QSC. Almost all the\n data will be taken from the output file, over-writing any\n calculations done in python when the new Qsc object is\n created.\n \"\"\"\n def to_string(nc_str):\n \"\"\" Convert a string from the netcdf binary format to a python string. \"\"\"\n temp = [c.decode('UTF-8') for c in nc_str]\n return (''.join(temp)).strip()\n \n f = netcdf.netcdf_file(filename, mmap=False)\n nfp = f.variables['nfp'][()]\n nphi = f.variables['nphi'][()]\n rc = f.variables['R0c'][()]\n rs = f.variables['R0s'][()]\n zc = f.variables['Z0c'][()]\n zs = f.variables['Z0s'][()]\n I2 = f.variables['I2'][()]\n B0 = f.variables['B0'][()]\n spsi = f.variables['spsi'][()]\n sG = f.variables['sG'][()]\n etabar = f.variables['eta_bar'][()]\n sigma0 = f.variables['sigma0'][()]\n order_r_option = to_string(f.variables['order_r_option'][()])\n if order_r_option == 'r2.1':\n order_r_option = 'r3'\n if order_r_option == 'r1':\n p2 = 0.0\n B2c = 0.0\n B2s = 0.0\n else:\n p2 = f.variables['p2'][()]\n B2c = f.variables['B2c'][()]\n B2s = f.variables['B2s'][()]\n\n q = cls(nfp=nfp, nphi=nphi, rc=rc, rs=rs, zc=zc, zs=zs,\n B0=B0, sG=sG, spsi=spsi,\n etabar=etabar, sigma0=sigma0, I2=I2, p2=p2, B2c=B2c, B2s=B2s, order=order_r_option)\n \n def read(name, cxx_name=None):\n if cxx_name is None: cxx_name = name\n setattr(q, name, f.variables[cxx_name][()])\n\n [read(v) for v in ['R0', 'Z0', 'R0p', 'Z0p', 'R0pp', 'Z0pp', 'R0ppp', 'Z0ppp',\n 'sigma', 'curvature', 'torsion', 'X1c', 'Y1c', 'Y1s', 'elongation']]\n if order_r_option != 'r1':\n [read(v) for v in ['X20', 'X2c', 'X2s', 'Y20', 'Y2c', 'Y2s', 'Z20', 'Z2c', 'Z2s', 'B20']]\n if order_r_option != 'r2':\n [read(v) for v in ['X3c1', 'Y3c1', 'Y3s1']]\n \n f.close()\n return q\n \n def min_R0_penalty(self):\n \"\"\"\n This function can be used in optimization to penalize situations\n in which min(R0) < min_R0_constraint.\n \"\"\"\n return np.max((0, self.min_R0_threshold - self.min_R0)) ** 2\n \n" ]
[ [ "numpy.zeros", "numpy.mod", "numpy.max", "numpy.min", "numpy.array", "scipy.io.netcdf.netcdf_file" ] ]
TharinduRusira/tvm
[ "b076cad542524cb3744149d953c341b5815f6474" ]
[ "topi/tests/python_cpp/test_topi_reorg.py" ]
[ "\"\"\"Test code for reorg\"\"\"\nimport logging\nimport numpy as np\nimport tvm\nimport topi\nimport topi.testing\nfrom topi.util import get_const_tuple\n\ndef verify_reorg(batch, in_size, in_channel, stride):\n '''Verify reorg operator by comparing outputs from tvm and numpy implementation'''\n in_height = in_width = in_size\n\n A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')\n B = topi.cpp.vision.reorg(A, stride)\n\n a_shape = get_const_tuple(A.shape)\n dtype = A.dtype\n\n def get_ref_data_reorg():\n '''Randomly initialize the data variables and get refernce output for the reorg operation'''\n a_np = np.random.uniform(size=a_shape).astype(dtype)\n b_np = topi.testing.reorg_python(a_np, stride)\n return a_np, b_np\n\n a_np, b_np = get_ref_data_reorg()\n def check_device(device):\n '''Check the device is available and if so, build and run the program'''\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n target = topi.cpp.TEST_create_target(device)\n if device == \"llvm\":\n s = topi.cpp.generic.default_schedule(target, [B], False)\n else:\n s = topi.cpp.cuda.schedule_injective(target, [B])\n ctx = tvm.context(device, 0)\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)\n func = tvm.build(s, [A, B], device, name=\"reorg\")\n func(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n for device in ['cuda', 'opencl', 'metal', 'rocm', 'llvm', 'vulkan']:\n check_device(device)\n\ndef test_reorg():\n verify_reorg(1, 38, 64, 2)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n test_reorg()\n" ]
[ [ "numpy.random.uniform" ] ]
thunderball7cd/night-to-day-image-processing
[ "388a082241af4bc67e770c76f207b58b330063d9" ]
[ "src/denoising/torch_utils.py" ]
[ "\"\"\"\n## CycleISP: Real Image Restoration Via Improved Data Synthesis\n## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao\n## CVPR 2020\n## https://arxiv.org/abs/2003.07761\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef load_checkpoint(model, weights):\n checkpoint = torch.load(weights, map_location='cpu')\n try:\n model.load_state_dict(checkpoint[\"state_dict\"])\n except:\n state_dict = checkpoint[\"state_dict\"]\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)" ]
[ [ "torch.load" ] ]
wakataw/ipython-dawet-sql
[ "af17db523bfcee236e2bae7cc36995e5b41f6c36" ]
[ "dawetsql/odbc_sql.py" ]
[ "import logging\nimport pypyodbc\nimport sys\n\nfrom pandas import DataFrame, read_sql, concat\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import magics_class, Magics, line_magic, cell_magic\nfrom dawetsql.widgets import SchemaExplorer\nfrom . import utils\nfrom cryptography.fernet import Fernet\n\n\n@magics_class\nclass OdbcSqlMagics(Magics):\n conn = None\n chunksize = 500\n reconnect = False\n max_retry = 3\n retry = 0\n __user = None\n __password = None\n __dsn = None\n __conn_string = None\n\n def __init__(self, *args, **kwargs):\n super(OdbcSqlMagics, self).__init__(*args, **kwargs)\n\n def __connect(self, dsn, username, password, connection_string, verbose=True):\n \"\"\"\n Open database connection\n :param dsn: ODBC DSN\n :return:\n \"\"\"\n try:\n if connection_string:\n self.conn = pypyodbc.connect(connection_string)\n else:\n self.conn = pypyodbc.connect(\"DSN={};Username={};Password={}\".format(dsn, username, password))\n if self.conn and verbose:\n print(\"Connected to {}\".format(dsn))\n except Exception as e:\n logging.error(e)\n return\n\n @line_magic('dawetsql')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-u', '--user', type=str, help=\"Dawet User\")\n @magic_arguments.argument('-p', '--password', type=str, help=\"Dawet Password\")\n @magic_arguments.argument('-d', '--dsn', type=str, help=\"Dawet DSN\")\n @magic_arguments.argument('-x', '--connection', type=str, help=\"ODBC Connection String\")\n @magic_arguments.argument('-c', '--chunksize', type=int, default=100, help=\"ODBC Fetch size\")\n @magic_arguments.argument('-a', '--reconnect', action='store_true', help='Auto Reconnect')\n @magic_arguments.argument('-r', '--retry', type=int, default=3, help='Max Retry')\n def odbc_connect(self, arg):\n \"\"\"\n Open Database Connection line magic method\n :param arg: ODBC DSN\n :return:\n \"\"\"\n if self.conn:\n self.odbc_disconnect()\n\n args = magic_arguments.parse_argstring(self.odbc_connect, arg)\n\n self.chunksize = args.chunksize\n self.max_retry = args.retry\n\n if args.reconnect:\n self.reconnect = True\n self.chipper = self.generate_chipper()\n self.__dsn = args.dsn\n self.__user = args.user\n \n if args.password:\n self.__password = self.chipper.encrypt(args.password.encode('utf8'))\n\n if args.connection:\n self.__conn_string = self.chipper.encrypt(args.connection.encode('utf8'))\n else:\n self.__conn_string = False\n\n return self.__connect(args.dsn, args.user, args.password, args.connection)\n\n @line_magic('dawetsqlclose')\n def odbc_disconnect(self, *args, **kwargs):\n \"\"\"\n Close Database Connection line magic method\n :return:\n \"\"\"\n try:\n self.conn.close()\n print(\"Disconnected\")\n except:\n pass\n finally:\n self.conn = None\n return\n\n @line_magic('dawetsqlreconnect')\n def odbc_reconnect(self, args=None, cell=None):\n if not self.reconnect:\n logging.error(\"You did not use reconnect arguments, try re initialize dawetsql with -a/--reconnect argument\")\n return\n\n self.odbc_disconnect()\n\n if self.__conn_string:\n connection_string = self.chipper.decrypt(self.__conn_string).decode('utf8')\n else:\n connection_string = False\n \n if self.__password:\n password = self.chipper.decrypt(self.__password).decode('utf8')\n else:\n password = None\n\n return self.__connect(self.__dsn, self.__user, password, connection_string, verbose=False)\n\n @cell_magic('dawetsql')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-l', '--limit', type=int, default=10, help=\"Set result limit\")\n @magic_arguments.argument('-o', '--ouput', default='_', type=str, help=\"File or Variable name for results data\")\n def odbc_sql(self, arg, cell=None):\n \"\"\"\n Run SQL Query\n :param arg: optional argument\n :param cell: SQL Query string\n :return:\n \"\"\"\n args = magic_arguments.parse_argstring(self.odbc_sql, arg)\n varname = args.ouput.strip()\n\n ok, valid_name = utils.validate_name(varname)\n query = ' '.join(cell.strip().split())\n\n if not ok:\n logging.error(\"Cannot proceed with `{}` as output name\".format(varname))\n return\n\n if not self.conn:\n logging.error(\n \"Please open connection first using %dawetsql line magic\")\n return\n\n if valid_name != '_':\n if valid_name.lower().endswith('.csv'):\n self.to_csv(query, valid_name)\n return\n elif valid_name.lower().endswith('.pkl'):\n self.to_pickle(query, valid_name)\n return\n else:\n self.to_dataframe(query, valid_name, download=True)\n return\n\n return self.to_dataframe(utils.limit_query(query, args.limit), valid_name)\n\n def download(self, query):\n utils.log_query(self.__user, query)\n data = None\n try:\n data = read_sql(query, self.conn, chunksize=self.chunksize)\n except Exception as e:\n logging.error(e.__class__.__name__)\n logging.error(e)\n\n if utils.teiid_resource_exception.findall(str(e)) and self.reconnect:\n if self.retry >= self.max_retry:\n self.retry = 0\n raise Exception('Max Retry Exception')\n\n self.retry += 1\n self.odbc_reconnect()\n return self.download(query)\n else:\n raise e\n\n return data\n\n def get_dataframe(self, query, verbose=True):\n \"\"\"\n Store query result to dataframe\n :param query: SQL Query\n :return: pandas dataframe\n :verbose: print process to stdout\n \"\"\"\n print(\"Fetching result\", flush=True) if verbose else None\n\n result = self.download(query)\n\n if result is None:\n return\n\n total = 0\n df_list = []\n\n for chunk in result:\n df_list.append(chunk)\n total += len(chunk)\n self.print_process(total) if verbose else None\n\n if df_list:\n df = concat(df_list, ignore_index=True)\n return df\n\n return DataFrame()\n\n def to_csv(self, query, filename):\n \"\"\"\n Export query result to csv\n :param query: SQL Query\n :param filename: csv filename\n :return:\n \"\"\"\n result = self.download(query)\n\n if result is None:\n return\n\n total = 0\n header = True\n\n for chunk in result:\n if header:\n mode = 'w'\n else:\n mode = 'a'\n chunk.to_csv(filename, index=False, mode=mode, header=header)\n total += len(chunk)\n self.print_process(total)\n header = False\n\n def to_dataframe(self, query, varname, download=False):\n \"\"\"\n Store dataframe to shell variable\n :param query: SQL query\n :param varname: Dataframe variable name\n :param download: Download or just preview query result\n :return:\n \"\"\"\n df = self.get_dataframe(query)\n\n if df is None:\n return\n\n self.shell.user_ns[varname] = df\n if not download:\n return df\n\n def to_pickle(self, query, pickle_name):\n \"\"\"\n Export query result to python pickle\n :param query: SQL Query\n :param pickle_name: pickle file name\n :return:\n \"\"\"\n df = self.get_dataframe(query)\n\n if df is None:\n return\n\n df.to_pickle(pickle_name)\n\n @line_magic('explorer')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-f', '--force', action='store_true', help=\"Force explorer to re-index schema\")\n def explore_schema(self, arg):\n \"\"\"\n Display schema explorer widgets\n :return:\n \"\"\"\n args = magic_arguments.parse_argstring(self.explore_schema, arg)\n\n print('Fetching schema detail..')\n\n explorer = SchemaExplorer(self)\n explorer.show(force=args.force)\n\n @staticmethod\n def generate_chipper():\n return Fernet(Fernet.generate_key())\n\n @staticmethod\n def print_process(total):\n sys.stdout.write(\"\\rTotal {} row(s) downloaded\".format(total))\n sys.stdout.flush()\n\n def __del__(self):\n if self.conn:\n self.conn.close()\n\n self.conn = None\n" ]
[ [ "pandas.DataFrame", "pandas.concat", "pandas.read_sql" ] ]
Lumonk/CNNs.PyTorch
[ "d634fb63c86bc135e7be79102983045696bdaed4" ]
[ "modules/dfx_modules.py" ]
[ "from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .dfx import quant, quant_grad, quant_fb\n\n__all__ = ['QLinear', 'QConv2d', 'QBatchNorm2d']\n\nclass QLinear(nn.Linear):\n\n def __init__(self, in_features, out_features, bias=True, weight_copy=True):\n super(QLinear, self).__init__(in_features, out_features, bias=bias)\n\n\n # params define\n self.num_bits = 16\n self.add_noise = False \n\n self.weight_params = (self.num_bits, self.num_bits, self.add_noise)\n self.act_params = (self.num_bits, self.num_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n def forward(self, input):\n\n\n if self.weight_copy:\n weight = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n bias = self.bias \n else:\n bias = None\n output = F.linear(input, weight, bias)\n else:\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n else:\n self.bias = None\n\n output = F.linear(input, self.weight, self.bias)\n\n output = quant_fb(output, *(self.act_params))\n\n return output\n\n def extra_repr(self):\n s = 'DFX_Linear, in_features={in_features}, out_features={out_features}, '\n s += 'dfx = {{ {num_bits}, add_noise = {add_noise} }}'\n if self.bias is not None:\n s += ', bias=True'\n return s.format(**self.__dict__)\n\n\nclass QConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,\n weight_copy=True):\n super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n # params define\n self.fw_bits = 8\n self.bw_bits = 8\n self.add_noise = False\n\n self.weight_params = (self.fw_bits, self.bw_bits, self.add_noise)\n self.act_params = (self.fw_bits, self.bw_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n def forward(self, input):\n\n if self.weight_copy:\n weight = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n bias = self.bias\n else:\n bias = None\n output = F.conv2d(input, weight, bias, self.stride,\n self.padding, self.dilation, self.groups)\n else:\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n else:\n self.bias = None\n\n output = F.conv2d(input, self.weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n output = quant_fb(output, *(self.act_params))\n return output\n\n def extra_repr(self):\n s = ('DFX_CONV, {in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}, dfx = {{ {fw_bits}, {bw_bits}, add_noise={add_noise}}}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.output_padding != (0,) * len(self.output_padding):\n s += ', output_padding={output_padding}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n return s.format(**self.__dict__)\n\n\nclass QBatchNorm2d(nn.Module):\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,\n track_running_stats=True, weight_copy=True):\n super(QBatchNorm2d, self).__init__()\n # quantization parameters:\n\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n\n # params define\n self.num_bits = 8\n self.add_noise = False\n self.weight_params = (self.num_bits, self.num_bits, self.add_noise)\n self.act_params = (self.num_bits, self.num_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n\n if self.affine:\n self.bias = nn.Parameter(torch.Tensor(num_features))\n self.weight = nn.Parameter(torch.Tensor(num_features))\n else:\n self.register_parameter('weights', None)\n self.register_parameter('bias', None)\n\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(num_features))\n self.register_buffer('running_var', torch.ones(num_features))\n self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n else:\n self.register_parameter('running_mean', None)\n self.register_parameter('running_var', None)\n self.register_buffer('num_batches_tracked', None)\n self.reset_parameters()\n\n\n def reset_running_stats(self):\n if self.track_running_stats:\n self.running_mean.zero_()\n self.running_var.fill_(1)\n self.num_batches_tracked.zero_()\n\n def reset_parameters(self):\n self.reset_running_stats()\n if self.affine:\n self.weight.data.uniform_()\n self.bias.data.zero_()\n\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n\n def forward(self, input):\n self._check_input_dim(input)\n\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None:\n self.num_batches_tracked += 1\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n if self.weight_copy:\n # weight = quant_fb(self.weight, *(self.weight_params))\n weight = self.weight\n bias = self.bias \n\n out = F.batch_norm(\n input, self.running_mean, self.running_var, weight, bias,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n else:\n\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n\n out = F.batch_norm(\n input, self.running_mean, self.running_var, self.weight, self.bias,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n\n out = quant_fb(out, *(self.act_params))\n\n return out\n\n def extra_repr(self):\n s = 'DFX_BN, {num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \\\n 'track_running_stata={track_running_stats}, '\n s += ' dfx = {{ {num_bits}, add_noise={add_noise} }}'\n return s.format(**self.__dict__)\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = local_metadata.get('version', None)\n\n if (version is None or version < 2) and self.track_running_stats:\n # at version 2: added num_batches_tracked buffer\n # this should have a default value of 0\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key not in state_dict:\n state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)\n\n super(QBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n" ]
[ [ "torch.ones", "torch.nn.functional.linear", "torch.nn.functional.conv2d", "torch.tensor", "torch.nn.functional.batch_norm", "torch.zeros", "torch.Tensor" ] ]
VT-ASIM-LAB/autoware.ai
[ "211dff3bee2d2782cb10444272c5d98d1f30d33a" ]
[ "jsk_recognition/jsk_recognition_utils/python/jsk_recognition_utils/depth.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom skimage.segmentation import slic\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import binary_closing\n\nfrom jsk_recognition_utils.mask import descent_closing\n\n\ndef split_fore_background(depth_img, footprint=None):\n if footprint is None:\n footprint = np.ones((3, 3))\n segments = slic(depth_img)\n\n local_maxi = peak_local_max(\n depth_img, labels=segments, footprint=footprint, indices=False)\n\n fg_mask = descent_closing(local_maxi, init_selem=np.ones((3, 3)), n_times=6)\n bg_mask = ~fg_mask\n return fg_mask, bg_mask\n" ]
[ [ "numpy.ones" ] ]
565353780/railway-fault-detect
[ "56c5df835d21efeb4e09111282d251c80eaa6ca0" ]
[ "src/Python/lapnet/test_line.py" ]
[ "import torch\nimport numpy as np\nimport os\n\nimport cv2\n\nfrom LapNet import LAPNet\nfrom create_dataset import createDataset\nfrom torch.nn import DataParallel\nfrom collections import OrderedDict\nfrom torch.nn.parameter import Parameter\nimport json\nimport base64\nimport numpy as np\n\nfrom flask import Flask, request, Response\napp = Flask(__name__)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nModelName = \"LapNet_chkpt_better_epoch3978_GPU0_line.pth\"\n# ModelName = \"LapNet_chkpt_better_epoch1890_GPU0_chLi_line.pth\"\nDetectMode = \"line\"\nPort = \"9360\"\n\nclass LapNet_Test:\n def __init__(self, model_name, detect_mode):\n # torch.cuda.set_device(args.gpu_idx)\n torch.cuda.set_device(0)\n\n # self.INPUT_CHANNELS = 3\n # self.OUTPUT_CHANNELS = 2\n # self.LEARNING_RATE = args.lr #1e-5\n # self.BATCH_SIZE = args.batch_size #20\n # self.NUM_EPOCHS = args.epoch #100\n # self.LOG_INTERVAL = 20\n # self.INS_CH = 32\n # self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]\n # self.NUM_WORKERS = args.num_workers #20\n\n self.INPUT_CHANNELS = 3\n self.OUTPUT_CHANNELS = 2\n self.LEARNING_RATE = 3e-4\n self.BATCH_SIZE = 32\n self.NUM_EPOCHS = 10000000000000\n self.LOG_INTERVAL = 20\n self.INS_CH = 32\n self.SIZE = [1024,512]\n self.NUM_WORKERS = 32\n\n self.model_name = model_name\n self.detect_mode = detect_mode\n\n self.root_path = '../../../thirdparty/lapnet-gpu'\n\n self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)\n\n chkpt_filename = self.root_path + '/trained_model/'+ self.model_name\n\n if not os.path.exists(self.root_path + '/trained_model'):\n os.mkdir(self.root_path + '/trained_model')\n if os.path.isfile(chkpt_filename):\n checkpoint = torch.load(chkpt_filename)\n self.start_epoch = checkpoint['epoch']\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.load_state_dict(checkpoint['net'])\n self.load_state_dict(self.model, self.state_dict(self.model))\n\n def state_dict(self, model, destination=None, prefix='', keep_vars=False):\n own_state = model.module if isinstance(model, torch.nn.DataParallel) \\\n else model\n if destination is None:\n destination = OrderedDict()\n for name, param in own_state._parameters.items():\n if param is not None:\n destination[prefix + name] = param if keep_vars else param.data\n for name, buf in own_state._buffers.items():\n if buf is not None:\n destination[prefix + name] = buf\n for name, module in own_state._modules.items():\n if module is not None:\n self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)\n return destination\n\n def load_state_dict(self, model, state_dict, strict=True):\n own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \\\n else model.state_dict()\n for name, param in state_dict.items():\n if name in own_state:\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception:\n raise RuntimeError('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(name, own_state[name].size(), param.size()))\n elif strict:\n raise KeyError('unexpected key \"{}\" in state_dict'\n .format(name))\n if strict:\n missing = set(own_state.keys()) - set(state_dict.keys())\n if len(missing) > 0:\n raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n\nlapnet_test = LapNet_Test(ModelName, DetectMode)\nlapnet_test.model.eval()\n\[email protected](\"/predict\", methods=[\"POST\"])\ndef predict():\n data= request.get_data()\n data_json = json.loads(data)\n\n img_b64encode = bytes(data_json[\"Image\"], encoding=\"utf-8\")\n \n img_b64decode = base64.b64decode(img_b64encode)\n\n img_array = np.frombuffer(img_b64decode, np.uint8)\n image = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)\n\n train_dataset = createDataset(\"\", size=lapnet_test.SIZE, image=image)\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,\n shuffle=False, num_workers=0)\n \n img = list(enumerate(train_dataloader))[0][1]\n\n img_tensor = torch.tensor(img).cuda()\n\n sem_pred = lapnet_test.model(img_tensor)\n\n seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()\n\n seg_show = seg_map[1]\n\n _, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)\n seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)\n seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)\n result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)\n\n output_img_array = cv2.imencode(\".jpg\", result_img)[1]\n\n output_img_b64encode = str(base64.b64encode(output_img_array))[2:-1]\n\n image_output_json = {}\n\n image_output_json[\"OutputImage\"] = output_img_b64encode\n\n return image_output_json\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=Port,debug=True)\n" ]
[ [ "torch.utils.data.DataLoader", "torch.load", "torch.tensor", "torch.squeeze", "numpy.frombuffer", "torch.cuda.set_device" ] ]
undeadinu/dldt
[ "fbc7a4a710c24def8ab199926a7da90a0394b87d" ]
[ "model-optimizer/mo/front/common/partial_infer/matmul.py" ]
[ "\"\"\"\n Copyright (c) 2018 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\n\nimport numpy as np\n\nfrom mo.utils.error import Error\n\n\ndef tf_matmul_infer(node):\n assert (len(node.in_nodes()) == 2)\n shapes = [node.in_node(i).shape for i in range(2)]\n log.debug('matmul shapes: {}'.format(shapes))\n if node.transpose_a or node.transpose_b or any(s is None or len(s) < 2 for s in shapes):\n log.error(\"MatMul wasn't able to infer shape\")\n return\n if any(shapes[0][:-2] != shapes[1][:-2]) or shapes[0][-1] != shapes[1][-2]:\n log.error(\"MatMul wasn't able to infer shape because input dimensions are not compatible\")\n return\n if any(shapes[0][1:-1] != 1):\n log.error(\"MatMul wasn't able to infer shapes because input[0] shape is invalid: {}\".format(shapes[0]))\n return\n\n shape_tuple = (np.array([shapes[0][0]], dtype=np.int64), np.array([shapes[1][-1]], dtype=np.int64))\n if len(shapes[0]) > 2:\n # TODO Investigate case when MatMul have inputs with not matching output dimensions\n # It looks to be a practical case and if we add outer dimensions of the first argument\n # it will lead to incorrect model sometimes. TF documentation is unclear.\n log.warning('Ignored outer dimensions of input tensor for MatMul node: {}'.format(node.name))\n # shape_tuple = (shapes[0][:-2], *shape_tuple)\n\n log.debug('shape_tuple: {}'.format(shape_tuple))\n node.out_node().shape = np.concatenate(shape_tuple)\n node['channel_dims'] = node.out_node().shape.size - 1\n log.debug('matmul shape: {}'.format(node.out_node().shape))\n\n\n\ndef onnx_gemm_infer(node):\n assert (len(node.in_nodes()) == 3)\n shapeA = node.in_node(0).shape\n shapeB = node.in_node(1).shape\n shapeC = node.in_node(2).shape\n\n assert shapeA.size >= 2 and shapeB.size == 2 and shapeC.size in [1, 2]\n\n if shapeA.size > 2 and node.transpose_a:\n raise Error(\n 'ONNX Gemm operation do not support {}dimensional input with set transA key'.format(shapeA.size))\n\n # apply transposes and broadcasts\n if node.transpose_a:\n shapeA = shapeA[[1,0]]\n if node.transpose_b:\n shapeB = shapeB[[1,0]]\n if node.broadcast_c and shapeC.size == 1:\n shapeC = np.array([shapeA[0], shapeC[0]])\n\n node.out_node().shape = shapeC\n return\n\n" ]
[ [ "numpy.array", "numpy.concatenate" ] ]
lsoffi/EsperienzeDiLaboratorioDiCalcolo201920
[ "7a2a821b37cc8dfca527e9afb639a86a8e6c759b" ]
[ "esercitazioni/ex3.py" ]
[ "#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.title('Un primo plot con Python')\nplt.xlabel('x')\nplt.ylabel('y')\nx = np.linspace(0.0, 5.0, 100)\ny = x\nplt.plot(x,y,label='y=x')\nx, y = np.loadtxt('temp.dat', usecols=(0,2), delimiter=' ', unpack=True)\nplt.plot(x,y, 'x',label='Loaded from file!')\nplt.savefig('traiettoria.png')\nplt.show()\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.title", "matplotlib.pyplot.show", "matplotlib.pyplot.ylabel", "matplotlib.pyplot.plot", "numpy.linspace", "matplotlib.pyplot.xlabel", "numpy.loadtxt" ] ]
Rockysed/PSC_classification
[ "1815b673ac9374d9d2abd08ba0f1f43597316dee" ]
[ "code/plotting_psc.py" ]
[ "# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 23 10:48:58 2019\r\n\r\n@author: rocco\r\n\"\"\"\r\nimport cartopy.crs as ccrs\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport os\r\n\r\n\"\"\"\r\nDefinition function to plot psc, df is a pandas dataframe, i = 0 if Northern Emisphere, i = 1 if Southern Emisphere\r\ntitle, classifier_type = [labels_bc, labels_svm_pc]\r\n\"\"\"\r\ndef plot_psc(df, i, title, classifier_type):\r\n if i == 1:\r\n ax = plt.axes(projection=ccrs.Orthographic(0, -90))\r\n else:\r\n ax = plt.axes(projection=ccrs.Orthographic(0, 90))\r\n ax.coastlines(resolution='10m')\r\n ax.gridlines() \r\n if classifier_type == \"labels_bc\":\r\n markers = ['s', 's', '^', 'o', 'D', 'v', '>']\r\n colors = ['w', 'b', 'r', 'chartreuse', 'cyan', 'goldenrod', 'steelblue']\r\n edges = ['k', 'b', 'r', 'chartreuse', 'cyan', 'goldenrod', 'steelblue']\r\n labels = ['unspec.', 'ICE', 'NAT', 'STSmix', 'ICE_NAT', \"NAT_STS\", \"ICE_STS\"]\r\n for j in range (7):\r\n plt.scatter(df[df[\"labels_bc\"]==j][\"lon\"] , df[df[\"labels_bc\"]==j][\"lat\"], c = colors[j], s=40 \\\r\n , marker=markers[j], transform=ccrs.Geodetic(), label=labels[j], edgecolors=edges[j])\r\n else:\r\n markers = ['s', '^', 'o', 'D', 'v']\r\n colors = ['b', 'r', 'chartreuse', 'chartreuse', 'chartreuse']\r\n labels = ['ICE', 'NAT', 'STS_1', 'STS_2', 'STS_3']\r\n for j in range (0, 5):\r\n plt.scatter(df[df[classifier_type]==j+1][\"lon\"] , df[df[classifier_type]==j+1][\"lat\"], c = colors[j], s=40 \\\r\n , marker=markers[j], transform=ccrs.Geodetic(), label=labels[j])\r\n \r\n\r\n if i == 1:\r\n ax.set_extent([-180, 180, -60, -90], crs=ccrs.PlateCarree())\r\n else:\r\n ax.set_extent([-180, 180, 60, 90], crs=ccrs.PlateCarree()) \r\n plt.plot()\r\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n plt.title(title)\r\n \r\n#dates to be considered \r\n\"\"\"\r\ndates = [\"2003-05-23\", \"2003-06-05\", \"2003-06-09\", \"2003-06-11\", \"2003-06-12\", \"2003-06-15\", \"2008-05-28\" \\\r\n , \"2008-05-29\", \"2008-05-30\", \"2008-05-31\", \"2008-06-01\", \"2008-06-02\", \"2007-01-25\", \"2011-01-07\" \\\r\n , \"2007-07-08\", \"2008-07-25\", \"2008-08-29\"]\r\n\"\"\"\r\n\"\"\"\r\ndates = [\"2003-05-23\", \"2003-06-05\", \"2003-06-09\", \"2003-06-11\", \"2003-06-12\", \"2003-06-15\", \"2008-05-28\" \\\r\n , \"2008-05-29\", \"2008-05-30\", \"2008-05-31\", \"2008-06-01\", \"2008-06-02\", \"2007-01-25\", \"2011-01-07\" \\\r\n , \"2007-07-08\", \"2008-07-25\", \"2008-08-29\"]\r\n\"\"\"\r\ndates = [\"2009-05-23\", \"2009-06-14\", \"2009-06-24\", \"2009-07-24\", \"2009-08-26\"]\r\n#Minumum and maximum tangent height\r\n#classifier type\r\n#classifier_type = \"labels_svm_kpca_red\"\r\n#classifier_type = \"labels_svm_lk_pc_rf_scale_v1\"\r\nclassifier_type = \"labels_bc\"\r\nif classifier_type == \"labels_svm_lk_pc_rf_scale_v1\":\r\n cl_type = \"SVM (RF + PCA)\"\r\nif classifier_type == \"labels_svm_kpca\":\r\n cl_type = \"SVM (KPCA)\" \r\nif classifier_type == \"labels_svm_auto\":\r\n cl_type = \"SVM (autoenc)\" \r\nif classifier_type == \"labels_bc\":\r\n cl_type = \"Bayesian cl.\"\r\n#cycle on dates\r\n\r\n#df = (df_may_2003[(df_may_2003['htang'] > htang_min) & (df_may_2003['htang'] < htang_max)]).loc[dates[0]]\r\n\r\nfor date in dates:\r\n year = date.split(\"-\")[0]\r\n month = date.split(\"-\")[1]\r\n day = date.split(\"-\")[2]\r\n c_date = year + month + day\r\n df = pd.read_hdf( '../data/mipas_pd/' + year + '_' + month + '_prova.h5','df_reduced', where=['index == c_date'])\r\n bins = ([(14, 16), (16, 18), (18, 22), (21.2, 26.8)])\r\n for k in range(0, len(bins)):\r\n df_binned = df[(df[\"htang\"] > bins[k][0]) & (df[\"htang\"] < bins[k][1])]\r\n if df_binned.shape[0] > 0:\r\n if df_binned[\"lat\"].mean() < 0:\r\n i = 1\r\n else: i = 0\r\n title = \"PSC plot date: \" + date + \" Altitude range: \" + str(bins[k][0]) + \"-\" + str(bins[k][1]) + \" [km]\" + \"\\n using \" + cl_type \r\n plot_psc(df_binned, i, title, classifier_type)\r\n my_path = \"../progetti/test_plots_specific_days/new/\" + cl_type\r\n if not os.path.exists(my_path):\r\n os.makedirs(my_path)\r\n my_file = date + \"_v2\" + str(k) +\".png\"\r\n plt.savefig(os.path.join(my_path, my_file))\r\n plt.close()\r\n \r\n\r\n\r\n#plt.scatter(df[df[\"labels_bc\"] == 2][\"lat\"] , df[df[\"labels_bc\"] == 2][\"lon\"], marker = markers[2], s=20, color='r', transform=ccrs.Geodetic())" ]
[ [ "matplotlib.pyplot.legend", "matplotlib.pyplot.title", "pandas.read_hdf", "matplotlib.pyplot.close", "matplotlib.pyplot.plot" ] ]
AxelAllen/Multimodal-BERT-in-Medical-Image-and-Text-Classification
[ "b60bb7bd4fe07773ee3bf8edfc5011a337ac6037" ]
[ "MMBT/image.py" ]
[ "\"\"\"\nThis code is adapted from the image.py by Kiela et al. (2020) in https://github.com/facebookresearch/mmbt/blob/master/mmbt/models/image.py\nand the equivalent Huggingface implementation: utils_mmimdb.py, which can be\nfound here: https://github.com/huggingface/transformers/blob/8ea412a86faa8e9edeeb6b5c46b08def06aa03ea/examples/research_projects/mm-imdb/utils_mmimdb.py\n\nThe ImageEncoderDenseNet class is modified from the original ImageEncoder class to be based on pre-trained DenseNet\ninstead of ResNet and to be able to load saved pre-trained weights.\n\nThis class makes up the image submodule of the MMBT model.\n\nThe forward function is also modified according to the forward function of the DenseNet model listed here:\n\nOriginal forward function of DenseNet\n\ndef forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n\"\"\"\nimport os\nimport logging\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\n\n\nlogger = logging.getLogger(__name__)\n\n# mapping number of image embeddings to AdaptiveAvgPool2d output size\nPOOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}\n\n# module assumes that the directory where the saved chexnet weight is in the same level as this module\nMMBT_DIR_PARENT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(MMBT_DIR_PARENT, \"data\")\nMODELS_DIR = os.path.join(DATA_DIR, \"models\")\nSAVED_CHEXNET = os.path.join(MODELS_DIR, \"saved_chexnet.pt\")\n\n\nclass ImageEncoderDenseNet(nn.Module):\n def __init__(self, num_image_embeds, saved_model=True, path=os.path.join(MODELS_DIR, SAVED_CHEXNET)):\n \"\"\"\n\n :type num_image_embeds: int\n :param num_image_embeds: number of image embeddings to generate; 1-9 as they map to specific numbers of pooling\n output shape in the 'POOLING_BREAKDOWN'\n :param saved_model: True to load saved pre-trained model False to use torch pre-trained model\n :param path: path to the saved .pt model file\n \"\"\"\n super().__init__()\n if saved_model:\n # loading pre-trained weight, e.g. ChexNet\n # the model here expects the weight to be regular Tensors and NOT cuda Tensor\n model = torch.load(path)\n logger.info(f\"Saved model loaded from: {path}\")\n else:\n model = torchvision.models.densenet121(pretrained=True)\n\n # DenseNet architecture last layer is the classifier; we only want everything before that\n modules = list(model.children())[:-1]\n self.model = nn.Sequential(*modules)\n # self.model same as original DenseNet self.features part of the forward function\n self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[num_image_embeds])\n\n def forward(self, input_modal):\n \"\"\"\n B = batch\n N = number of image embeddings\n 1024 DenseNet embedding size, this can be changed when instantiating MMBTconfig for modal_hidden_size\n\n Bx3x224x224 (this is input shape) -> Bx1024x7x7 (this is shape after DenseNet CNN layers before the last layer)\n -> Bx1024xN (this is after torch.flatten step in this function below) -> BxNx1024 (this is the shape of the\n output tensor)\n\n :param input_modal: image tensor\n :return:\n \"\"\"\n # Bx3x224x224 -> Bx1024x7x7 -> Bx1024xN -> BxNx1024\n features = self.model(input_modal)\n out = F.relu(features, inplace=True)\n out = self.pool(out)\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous()\n\n return out # BxNx1024" ]
[ [ "torch.load", "torch.nn.AdaptiveAvgPool2d", "torch.flatten", "torch.nn.functional.relu", "torch.nn.Sequential" ] ]
WONDER-project/GSAS-II-WONDER-OSX
[ "f90ab85f89f282d1b9686a1cbbf5adc5c48ceac9" ]
[ "GSAS-II-WONDER/SUBGROUPS.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\n*SUBGROUPS: Interface to special GSAS Bilbao SUBGROUPS & k-SUBGROUPSMAG web pages*\n-------------------------------\n\nExtraction of space subgroups for a given space group and a propagation vector\nfrom the GSAS version of SUBGROUPS & k-SUBGROUPSMAG web page on the Bilbao Crystallographic server\n\n\"\"\"\n########### SVN repository information ###################\n# $Date: 2018-07-10 11:41:00 -0500 (Tue, 10 Jul 2018) $\n# $Author: vondreele $\n# $Revision: 3465 $\n# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/kSUBGROUPSMAG.py $\n# $Id: kSUBGROUPSMAG.py 3465 2018-07-10 16:41:00Z vondreele $\n########### SVN repository information ###################\nfrom __future__ import division, print_function\nimport requests\nimport numpy as np\nimport numpy.linalg as nl\nimport GSASIIspc as G2spc\nimport GSASIIpath\nGSASIIpath.SetBinaryPath()\nsubmagSite = 'http://www.cryst.ehu.es/cgi-bin/cryst/programs/subgrmag1_general_GSAS.pl'\n\ndef GetNonStdSubgroups(SGData, kvec,star=False,landau=False,maximal=False):\n '''Run Bilboa's SUBGROUPS for a non-standard space group. \n This requires doing a post to the Bilboa site, which returns all\n subgroups of the entered space group as the text of a web page \n with a table containing the space group symbol, the \n transformation matrix and index for each subgroup.\n\n :params list kvec: propogation vector as a list of nine string fractions or blank\n :params SGData: space group object (see :ref:`Space Group object<SGData_table>`) \n\n :returns: (error,text) error: if True no error or False; where \n text containts a possible web page text\n '''\n print('''\n For use of SUBGROUPS, please cite:\n Symmetry-Based Computational Tools for Magnetic Crystallography,\n J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo\n Annu. Rev. Mater. Res. 2015. 45,217-48.\n doi: 10.1146/annurev-matsci-070214-021008\n ''')\n \n \n def getSpGrp(item):\n return item.replace('<i>','').replace('</i>','').replace('<sub>','').replace('</sub>','')\n \n def getMatVec(item):\n return item.replace('{','[').replace('}',']')\n \n starmag = 'no'\n if star:\n starmag = 'yes'\n land = 'no'\n if landau:\n land = 'yes'\n celtodas = 'no'\n limite = 'spgroup'\n if maximal:\n limite = 'maximal'\n postdict = {'centrosymmetry':'0','crystalsystem':'0','landau':land,\n 'eleccion':'subgrmag1_k','inicio':'nostandard','celtodas':celtodas,\n 'limite':limite,'list':'Submit','listado':'lista','starmagnetica':starmag,\n 'pointgroup':'0','polarity':'0','sub':'1',\n 'super':'','tipog':'gesp','wyckoffstrain':''}\n text,table = G2spc.SGPrint(SGData)\n OpList = G2spc.TextOps(text,table,reverse=True)\n# GenList = G2spc.TextGen(SGData,reverse=True)\n for item in OpList:\n item += '\\n'\n sym = \"\"\n for i in OpList:\n if sym: sym += '\\n'\n #if sym: sym += ' ' # use this for testing to generate an error in place of previous\n sym += i.lower()\n postdict['generators'] = sym\n for j in [1,2,3]:\n if kvec[3*j-3] == ' ':\n break\n for i,k in zip(('x','y','z'),kvec[3*j-3:3*j]):\n postdict['knm%d%s'%(j,i)] = k\n try:\n r = requests.post(submagSite,postdict)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None,None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None,None\n r.close()\n \n result = page.replace('&','\\n')\n result = result.split('\\n')\n SPGPs = []\n MVs = []\n baseList = []\n itemList = []\n superList = []\n altList = []\n start = 0\n for line in result: #work around bug report from Bilbao\n start += 1\n if 'yesz' in line:\n break\n for line in result[start:]:\n if 'GGG' in line:\n lines = line.split('GGG')\n line = lines[0]\n alts = []\n beg = True\n for sline in lines:\n items = sline.split('z')\n gid = int(items[0])\n if beg:\n baseList.append(gid)\n beg = False\n alts.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n SPGPs.append(getSpGrp(items[4]))\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n altList.append(alts)\n for sline in lines[1:]:\n altList.append([])\n else:\n items = line.split('z')\n gid = int(items[0])\n altList.append([gid,])\n baseList.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n SPGPs.append(getSpGrp(items[4]))\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n result = list(zip(SPGPs,MVs,itemList,altList,superList))\n return result,baseList\n\ndef GetNonStdSubgroupsmag(SGData, kvec,star=False,landau=False,maximal=False):\n '''Run Bilboa's k-Subgroupsmag for a non-standard space group. \n This requires doing a post to the Bilboa site, which returns all\n magnetic subgroups of the entered subgroup as the text of a web page \n with a table containing the BNS magnetic space group symbol, the \n transformation matrix and index for each subgroup.\n\n :params list kvec: propogation vector as a list of three numbers\n :params SGData: space group object (see :ref:`Space Group object<SGData_table>`) \n\n :returns: (error,text) error: if True no error or False; where \n text containts a possible web page text\n '''\n print('''\n For use of k-SUBGROUPSMAG, please cite:\n Symmetry-Based Computational Tools for Magnetic Crystallography,\n J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo\n Annu. Rev. Mater. Res. 2015. 45,217-48.\n doi: 10.1146/annurev-matsci-070214-021008\n ''')\n\n def getSpGrp(item):\n return item.replace('<i>','').replace('</i>','').replace('<sub>','').replace('</sub>','')\n \n def getBNS(item):\n spgrp = getSpGrp(item)\n bns = ''\n sid = item.find('<sub>')\n if sid == 8:\n bns = spgrp[1]\n spgrp = '%s_%s %s'%(spgrp[0],bns,spgrp[2:])\n return spgrp,bns\n \n def getMatVec(item):\n return item.replace('{','[').replace('}',']')\n \n starmag = 'no'\n if star:\n starmag = 'yes'\n land = 'no'\n if landau:\n land = 'yes'\n celtodas = 'no'\n limite = 'spgroup'\n if maximal:\n limite = 'maximal'\n postdict = {'centrosymmetry':'0','crystalsystem':'0','landau':land,\n 'eleccion':'subgrmag1_k','inicio':'nostandard','celtodas':celtodas,\n 'limite':limite,'list':'Submit','listado':'lista','starmagnetica':starmag,\n 'pointgroup':'0','polarity':'0','sub':'1.1',\n 'super':'','tipog':'gmag','wyckoffstrain':''}\n text,table = G2spc.SGPrint(SGData)\n OpList = G2spc.TextOps(text,table,reverse=True)\n# OpList = G2spc.TextGen(SGData,reverse=True)\n for item in OpList:\n item += '\\n'\n sym = \"\"\n for i in OpList:\n if sym: sym += '\\n'\n #if sym: sym += ' ' # use this for testing to generate an error in place of previous\n sym += i.lower()\n postdict['generators'] = sym\n for j in [1,2,3]:\n if kvec[3*j-3] == ' ':\n break\n for i,k in zip(('x','y','z'),kvec[3*j-3:3*j]):\n postdict['km%d%s'%(j,i)] = k\n try:\n r = requests.post(submagSite,postdict)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None,None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None,None\n r.close()\n\n result = page.replace('&','\\n')\n result = result.split('\\n')\n start = 0\n for line in result: #work around bug report from Bilbao\n start += 1\n if 'yesz' in line:\n break\n SPGPs = []\n BNSs = []\n MVs = []\n baseList = []\n itemList = []\n superList = []\n altList = []\n for line in result[start:]:\n if 'GGG' in line:\n lines = line.split('GGG')\n alts = []\n beg = True\n for sline in lines:\n items = sline.split('z')\n gid = int(items[0])\n if beg:\n baseList.append(gid)\n beg = False\n alts.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n spgrp,bns = getBNS(items[4])\n SPGPs.append(spgrp)\n BNSs.append(bns)\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n altList.append(alts)\n for sline in lines[1:]:\n altList.append([])\n else:\n items = line.split('z')\n gid = int(items[0])\n altList.append([gid,])\n baseList.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n spgrp,bns = getBNS(items[4])\n SPGPs.append(spgrp)\n BNSs.append(bns)\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n result = list(zip(SPGPs,BNSs,MVs,itemList,altList,superList))\n return result,baseList\n\ndef subBilbaoCheckLattice(spgNum,cell,tol=5):\n '''submit a unit cell to Bilbao PseudoLattice\n '''\n psSite = \"http://www.cryst.ehu.es/cgi-bin/cryst/programs/pseudosym/nph-pseudolattice\"\n cellstr = '+'.join(['{:.5f}'.format(i) for i in cell])\n datastr = \"sgr={:}&cell={:}&tol={:}&submit=Show\".format(\n str(int(spgNum)),cellstr,str(int(tol)))\n try:\n r = requests.post(psSite,data=datastr)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None\n r.close()\n return page\n\ndef parseBilbaoCheckLattice(page):\n '''find the cell options from the web page returned by Bilbao PseudoLattice\n '''\n cellopts = [i for i in page.split('<tr>') if '<td><pre>' in i]\n found = []\n for c in cellopts:\n cells = c.split(\"pre\")[1].split('<')[0].replace('>','').split('\\n') # list of cells, 1st is approx\n try:\n acell = [float(i) for i in cells[0].split()]\n xmatA = [c.split('[')[i].split(']')[0].split() for i in (1,2,3)]\n xmat = np.array([[eval(i) for i in j] for j in xmatA])\n cellmat = nl.inv(xmat).T\n except:\n print('Error processing cell in',c)\n continue\n found.append((acell,cellmat))\n return found\n\n\ndef test():\n SGData = G2spc.SpcGroup('f d -3 m')[1]\n \n print('test SUBGROUPSMAG') \n results,baseList = GetNonStdSubgroupsmag(SGData,('0','0','0',' ',' ',' ',' ',' ',' ',' '))\n if results:\n for [spgp,bns,mv,gid,altList,supList] in results:\n if gid in baseList:\n print('Space group: %d %s BNS: %s'%(gid,spgp,bns))\n print('MV',mv)\n print('altList:',altList)\n print('superList: ',supList)\n \n print('test SUBGROUPS')\n results,baseList = GetNonStdSubgroups(SGData,('1/3','1/3','1/2',' ',' ',' ',' ',' ',' ',' '))\n if results:\n for [spgp,mv,gid,altList,supList] in results:\n if gid in baseList:\n print('Space group: %d %s'%(gid,spgp))\n print('MV',mv)\n print('altList:',altList)\n print('superList: ',supList)\n \n \n\nif __name__ == '__main__':\n # run self-tests\n selftestquiet = False\n test()\n print (\"OK\")\n" ]
[ [ "numpy.linalg.inv" ] ]
ajeytiwary/sunpy
[ "6ba94b471f2a2e716f91ef8b8014adbef358aa6f" ]
[ "sunpy/cm/cm.py" ]
[ "\"\"\"\nThis module provides a set of colormaps specific for solar data.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom sunpy.cm import color_tables as ct\n\n__all__ = ['get_cmap', 'show_colormaps']\n\nsdoaia94 = ct.aia_color_table(94)\nsdoaia131 = ct.aia_color_table(131)\nsdoaia171 = ct.aia_color_table(171)\nsdoaia193 = ct.aia_color_table(193)\nsdoaia211 = ct.aia_color_table(211)\nsdoaia304 = ct.aia_color_table(304)\nsdoaia335 = ct.aia_color_table(335)\nsdoaia1600 = ct.aia_color_table(1600)\nsdoaia1700 = ct.aia_color_table(1700)\nsdoaia4500 = ct.aia_color_table(4500)\n\nsohoeit171 = ct.eit_color_table(171)\nsohoeit195 = ct.eit_color_table(195)\nsohoeit284 = ct.eit_color_table(284)\nsohoeit304 = ct.eit_color_table(304)\n\nsoholasco2 = ct.lasco_color_table(2)\nsoholasco3 = ct.lasco_color_table(3)\n\nstereocor1 = ct.cor_color_table(1)\nstereocor2 = ct.cor_color_table(2)\n\nstereohi1 = ct.stereo_hi_color_table(1)\nstereohi2 = ct.stereo_hi_color_table(2)\n\nyohkohsxtal = ct.sxt_color_table('al')\nyohkohsxtwh = ct.sxt_color_table('wh')\n\nhinodexrt = ct.xrt_color_table()\nhinodesotintensity = ct.sot_color_table('intensity')\n#hinodesotstokesquv = ct.sot_color_table('stokesQUV')\n#hinodesotmagneticf = ct.sot_color_table('magnetic field')\n#hinodesotvelocity = ct.sot_color_table('velocity')\n#hinodesotwidth = ct.sot_color_table('width')\n\ntrace171 = ct.trace_color_table('171')\ntrace195 = ct.trace_color_table('195')\ntrace284 = ct.trace_color_table('284')\ntrace1216 = ct.trace_color_table('1216')\ntrace1550 = ct.trace_color_table('1550')\ntrace1600 = ct.trace_color_table('1600')\ntrace1700 = ct.trace_color_table('1700')\ntraceWL = ct.trace_color_table('WL')\n\nhmimag = ct.hmi_mag_color_table()\n\ncmlist = {\n 'sdoaia94': sdoaia94,\n 'sdoaia131': sdoaia131,\n 'sdoaia171': sdoaia171,\n 'sdoaia193': sdoaia193,\n 'sdoaia211': sdoaia211,\n 'sdoaia304': sdoaia304,\n 'sdoaia335': sdoaia335,\n 'sdoaia1600': sdoaia1600,\n 'sdoaia1700': sdoaia1700,\n 'sdoaia4500': sdoaia4500,\n 'sohoeit171': sohoeit171,\n 'sohoeit195': sohoeit195,\n 'sohoeit284': sohoeit284,\n 'sohoeit304': sohoeit304,\n 'soholasco2': soholasco2,\n 'soholasco3': soholasco3,\n 'stereocor1': stereocor1,\n 'stereocor2': stereocor2,\n 'stereohi1': stereohi1,\n 'stereohi2': stereohi2,\n 'rhessi': cm.jet, # pylint: disable=E1101\n 'yohkohsxtal': yohkohsxtal,\n 'yohkohsxtwh': yohkohsxtwh,\n 'hinodexrt': hinodexrt,\n 'hinodesotintensity': hinodesotintensity,\n #'hinodesotstokesquv': hinodesotstokesquv,\n #'hinodesotmagneticf': hinodesotmagneticf,\n #'hinodesotvelocity': hinodesotvelocity,\n #'hinodesotwidth': hinodesotwidth,\n 'trace171': trace171,\n 'trace195': trace195,\n 'trace284': trace284,\n 'trace1216': trace1216,\n 'trace1550': trace1550,\n 'trace1600': trace1600,\n 'trace1700': trace1700,\n 'traceWL': traceWL,\n 'hmimag': hmimag\n }\n\n\ndef get_cmap(name='sdoaia94'):\n \"\"\"Get a colormap.\n\n Parameters\n ----------\n name : string\n The name of a color map.\n\n Returns\n -------\n value : matplotlib colormap\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> colormap = cm.get_cmap(name = 'sdoaia94')\n\n References\n ----------\n | http://matplotlib.sourceforge.net/api/cm_api.html\n\n \"\"\"\n if name in cmlist:\n return cmlist.get(name)\n else:\n raise ValueError(\"Colormap {name!s} is not recognized\".format(name=name))\n\n\ndef show_colormaps():\n \"\"\"Displays a plot of the custom color maps supported in SunPy.\n\n Parameters\n ----------\n None : none\n\n Returns\n -------\n None : none\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> cm.show_colormaps()\n\n References\n ----------\n\n \"\"\"\n maps = sorted(cmlist)\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n\n plt.show()\n\n# def test_equalize(data):\n# \"\"\"Returns a color map which performs histogram equalization on the data.\n#\n# Parameters\n# ----------\n# data : ndarray\n#\n# Returns\n# -------\n# value : matplotlib colormap\n#\n# See Also\n# --------\n#\n# Examples\n# --------\n# >>> import sunpy.cm as cm\n# >>> cm.test_equalize()\n#\n# Reference\n# ---------\n# | http://matplotlib.sourceforge.net/api/cm_api.html\n#\n# .. warning:: this function is under development\n#\n# .. todo:: finish coding this function!\n#\n# \"\"\"\n# dfile = cbook.get_sample_data('s1045.ima', asfileobj=False)\n#\n# im = np.fromstring(file(dfile, 'rb').read(), np.uint16).astype(float)\n# im.shape = 256, 256\n#\n# #imshow(im, ColormapJet(256))\n# #imshow(im, cmap=cm.jet)\n#\n# imvals = np.sort(im.flatten())\n# lo = imvals[0]\n# hi = imvals[-1]\n# steps = (imvals[::len(imvals)/256] - lo) / (hi - lo)\n# num_steps = float(len(steps))\n# interps = [(s, idx/num_steps, idx/num_steps) for idx,\n# s in enumerate(steps)]\n# interps.append((1, 1, 1))\n# cdict = {'red': interps,\n# 'green': interps,\n# 'blue': interps}\n# histeq_cmap = colors.LinearSegmentedColormap('HistEq', cdict)\n# pylab.figure()\n# pylab.imshow(im, cmap=histeq_cmap)\n# pylab.title('histeq')\n# pylab.show()\n" ]
[ [ "numpy.vstack", "matplotlib.pyplot.figure", "matplotlib.pyplot.axis", "matplotlib.pyplot.subplot", "matplotlib.pyplot.show", "numpy.linspace" ] ]
csprock/bmdcluster
[ "0caf02bb8a93846aa679518ee6a839f843819eac" ]
[ "bmdcluster/optimizers/blockdiagonalBMD.py" ]
[ "import numpy as np\r\n\r\n\"\"\"\r\nThis module contains a variant of the Binary Matrix Decomposition (BMD) algorithm for clustering binary data\r\nas presented in \"A General Model for Clustering Binary Data\" (Tao Li, 2005) and \"On Clustering Binary Data\"\r\n(Tao Li & Shenghuo Zhu, 2005). This varient of the BMD algorithm is for data whose matrix is can be \r\nrearranged into block-diagonal form. That is, each set of data is associated with a set of features and vice-versa. \r\nThis module implements Algorithm 2 from Li (2005) supplemented with ideas from Li & Zhu (2005). \r\n\r\n\r\nGeneral Nomenclature:\r\n\r\n K: the number of data clusters\r\n C: the number of feature clusters\r\n n: size of data set\r\n m: number of data features\r\n W: binary data matrix\r\n Is of size n x m, with data in rows and features in columns. \r\n A: data cluster indicator matrix\r\n n x K binary indicator matrix encoding the cluster membership of the data. \r\n Each point can belong to exactly one cluster, so each row consists of zeros except for a single 1. \r\n B: feature cluster indicator matrix. \r\n m x C binary indicator matrix encoding the cluster membership of the features. \r\n Each feature can belong to exactly one cluster, so each row consists of zeros except for a single 1. \r\n X: a K x C matrix that encodes the relationship between data clusters and feature clusters.\r\n\r\n\"\"\"\r\n\r\nITER_MESSAGE = \"Iteration: {0} ............. Cost: {1:.3f}\"\r\n\r\ndef _bd_objective(A,B,W):\r\n \"\"\" Objective function for block diagonal variation of BMD.\"\"\"\r\n return np.linalg.norm(W - np.dot(A, B.T))\r\n\r\n\r\ndef _is_bd_outlier(B):\r\n \"\"\"Determines if a feature is an outlier if it is equally associated \r\n with each cluster. This is checked by seeing if all the entries in a \r\n given row of the candidate feature cluster association matrix are 1's. \r\n Any rows that meet these conditions are set to 0. (see Li and Zhu)\r\n\r\n Parameters\r\n ----------\r\n B : np.array\r\n candidate feature cluster assignment matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n feature cluster assignment matrix\r\n \"\"\"\r\n\r\n i = np.where(np.sum(B, axis=1) == B.shape[1])[0]\r\n B[i, :] = 0\r\n \r\n return B\r\n \r\n\r\ndef _d_ik(i, W, B):\r\n \"\"\" The data cluster matrix A is updated using formula 10 from Li (2005) which is the same as \r\n formula 2.3 in Li & Zhu (2005). The formula uses the squared distance between ith point and \r\n the kth cluster. The point is then assigned to the closest cluster. The squared distance\r\n between point i and data cluster k is computed by summing over the element-wise differences\r\n between the i-th row and k-th row of W and B, respectively: \r\n \r\n d[i,k] = SUM_{j in features} (W[i,j] - B[k,j])^2j\r\n \r\n Parameters\r\n ----------\r\n i : int\r\n infdex of data point\r\n W : np.array\r\n binary data matrix\r\n B : np.array\r\n feature cluster assignment matrix\r\n \r\n Returns\r\n -------\r\n int\r\n index of assigned cluster\r\n \"\"\"\r\n\r\n # Vectorized implementation to compute summations found in formula 10. \r\n Di = W[i,:].reshape((W.shape[1],1)) - B # broadcast i-th row of W across columns of B\r\n Di = Di*Di \r\n Di = Di.sum(axis = 0) # sum over rows (features)\r\n assigned_cluster = Di.argmin() # take index of minimum quantity to be new cluster assignment\r\n \r\n return assigned_cluster\r\n \r\n\r\n \r\n#### assign clusters ####\r\n#def ai(B,W,i):\r\n# \r\n# q = B.T - W[i,:]\r\n# q = q*q\r\n# q = q.sum(axis = 1)\r\n# return q.argmin()\r\n\r\n#########################\r\n\r\ndef _bd_updateA(A,B,W):\r\n \"\"\"Update data cluster assignment matrix A using formula 10 in Li (2005). \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n old data cluster matrix\r\n B : np.array\r\n old feature cluster matrix\r\n W : np.array\r\n binary data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n updated data cluster matrix\r\n \"\"\"\r\n\r\n n, K = A.shape\r\n A_new = np.zeros((n,K))\r\n \r\n for i in range(n): \r\n A_new[i,:], A_new[i, _d_ik(i, W, B)] = 0, 1\r\n \r\n return A_new\r\n\r\n\r\ndef _Y(A, W):\r\n \"\"\" The feature cluster matrix B is updated using formula 11 from Li (2005). This is done\r\n by computing a 'probability matrix' Y where the kj-th entry represents the probability\r\n feature j is in the k-th cluster. The updated matrix B is the same shape as Y and contains\r\n 1's where the corresponding entry of Y is greater than or equal to 1/2 and 0's elsewhere. \r\n (Note Li (2005) uses a strict inequality, but we have found empirically that nonstrict \r\n inequality works better.) \r\n \r\n The formula for the matrix Y is: \r\n \r\n \r\n y[i,j] = (1/n_k)*SUM_{i in data} a[i,k]*w[i,j] = (1/n_k)*( a[:,k]'w[:,j] )\r\n n_k = number of points in cluster k\r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n data cluster matrix\r\n W : np.array\r\n data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n probability matrix\r\n \"\"\"\r\n\r\n n_k = A.sum(axis = 0) # Compute number of points in each cluster.\r\n n_k[np.where(n_k == 0)[0]] = np.inf # Set zero entries to inf to zero out reciprocal. \r\n r = 1 / n_k # Compute reciprocal. \r\n r.shape = (A.shape[1],1) # Reshape for broadcasting. \r\n\r\n return np.dot(A.T, W)*r # Compute Y matrix as dot product matrix of rows of A and W. \r\n\r\n\r\ndef _bd_updateB(A,W):\r\n \"\"\" Updated feature cluster matrix B. Applies the _Y() and B set to the matrix the same shape\r\n as Y but with 1's in the entries corresponding to where Y[>=0.5] and 0's elsewhere.\r\n \r\n Features that are associated with all clusters are 'outliers' (have a row whose entries >=0.5 in Y)\r\n Following Li and Zhu are not assigned to any clusters by setting all entries in B associated with those\r\n features to 0. \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n old data cluster matrix\r\n W : np.array\r\n data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n new feature cluster matrix\r\n \"\"\"\r\n\r\n \r\n Y = _Y(A, W)\r\n B_new = np.greater_equal(Y, 0.5).T # Update B matrix. \r\n \r\n #### setting all True rows to False ####\r\n # if feature has similar associate to all clusters, is an outlier (see Li and Zhu)\r\n # will have a row of all True by the np.greater_equal() function, reverse to make row of False\r\n \r\n # # TODO: use single outlier function and create a shared utils.py \r\n # def is_outlier(d):\r\n \r\n # if np.array_equal(d, np.array([True]*len(d))):\r\n # return np.array([False]*len(d))\r\n # else:\r\n # return d\r\n \r\n # B_new = np.apply_along_axis(is_outlier, axis = 1, arr = B_new)\r\n\r\n B_new = _is_bd_outlier(B_new)\r\n \r\n return B_new\r\n \r\n\r\ndef run_bd_BMD(A,W, max_iter=100, verbose=False):\r\n \"\"\"Executes clustering Algorithm 2 from Li (2005). \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n initial data cluster assignment matrix\r\n W : np.array\r\n binary data matrix\r\n max_iter : int, optional\r\n maximum number of algorithm iterations, by default 100\r\n verbose : bool, optional\r\n print progress and objective function value, by default False\r\n \r\n Returns\r\n -------\r\n float\r\n final value of objective function\r\n np.array\r\n final data cluster matrix\r\n np.array\r\n final feature cluster matrix\r\n \"\"\"\r\n \r\n \r\n B = _bd_updateB(A,W)\r\n O_old = _bd_objective(A, B, W)\r\n\r\n n_iter = 0\r\n\r\n while n_iter < max_iter:\r\n A = _bd_updateA(A,B,W)\r\n B = _bd_updateB(A,W)\r\n O_new = _bd_objective(A,B,W)\r\n if O_new < O_old:\r\n O_old = O_new\r\n if verbose:\r\n print(ITER_MESSAGE.format(n_iter, O_new))\r\n n_iter += 1\r\n else:\r\n break\r\n\r\n if verbose:\r\n print(\"Convergence reached after {0} iterations\".format(n_iter+1))\r\n \r\n return O_new, A, B" ]
[ [ "numpy.sum", "numpy.dot", "numpy.zeros", "numpy.greater_equal", "numpy.where" ] ]
Zelenyy/phd-code
[ "d5b8bfefd2418a915dde89f7da2cb6683f438556" ]
[ "python/simulation_scripts/grep_log.py" ]
[ "from phd.utils.path_tools import LogTime\nimport numpy as np\n\nlog = LogTime(\".\")\npaths = np.array(log.paths)\nindx = paths.argsort()\ncount = 1\nfor t, p in zip(log.time[indx], paths[indx]):\n if (count % 3 == 0):\n print(p,t)\n count+=1" ]
[ [ "numpy.array" ] ]
binfnstats/eli5
[ "017c738f8dcf3e31346de49a390835ffafad3f1b" ]
[ "eli5/formatters/image.py" ]
[ "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom typing import Union, Optional, Callable\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.cm\n\nfrom eli5.base import Explanation\n\n\ndef format_as_image(expl, # type: Explanation\n resampling_filter=Image.LANCZOS, # type: int\n colormap=matplotlib.cm.viridis, # type: Callable[[np.ndarray], np.ndarray]\n alpha_limit=0.65, # type: Optional[Union[float, int]]\n ):\n # type: (...) -> Image\n \"\"\"format_as_image(expl, resampling_filter=Image.LANCZOS, colormap=matplotlib.cm.viridis, alpha_limit=0.65)\n\n Format a :class:`eli5.base.Explanation` object as an image.\n\n Note that this formatter requires ``matplotlib`` and ``Pillow`` optional dependencies.\n\n\n :param Explanation expl:\n :class:`eli5.base.Explanation` object to be formatted.\n It must have an ``image`` attribute with a Pillow image that will be overlaid.\n It must have a ``targets`` attribute, a list of :class:`eli5.base.TargetExplanation` \\\n instances that contain the attribute ``heatmap``, \\\n a rank 2 numpy array with float values in the interval [0, 1].\n Currently ``targets`` must be length 1 (only one target is supported).\n\n\n :raises TypeError: if ``heatmap`` is not a numpy array.\n :raises ValueError: if ``heatmap`` does not contain values as floats in the interval [0, 1].\n :raises TypeError: if ``image`` is not a Pillow image.\n\n :param resampling_filter:\n Interpolation ID or Pillow filter to use when resizing the image.\n\n Example filters from PIL.Image\n * ``NEAREST``\n * ``BOX``\n * ``BILINEAR``\n * ``HAMMING``\n * ``BICUBIC``\n * ``LANCZOS``\n\n See also `<https://pillow.readthedocs.io/en/stable/handbook/concepts.html#filters>`_.\n\n *Note that these attributes are integer values*.\n\n Default is ``PIL.Image.LANCZOS``.\n :type resampling_filter: int, optional\n\n :param colormap:\n Colormap scheme to be applied when converting the heatmap from grayscale to RGB.\n Either a colormap from matplotlib.cm, \n or a callable that takes a rank 2 array and \n returns the colored heatmap as a [0, 1] RGBA numpy array.\n\n Example colormaps from matplotlib.cm\n * ``viridis``\n * ``jet``\n * ``binary``\n\n See also https://matplotlib.org/gallery/color/colormap_reference.html.\n\n Default is ``matplotlib.cm.viridis`` (green/blue to yellow).\n :type colormap: callable, optional\n\n :param alpha_limit:\n Maximum alpha (transparency / opacity) value allowed \n for the alpha channel pixels in the RGBA heatmap image.\n\n Between 0.0 and 1.0.\n\n Useful when laying the heatmap over the original image, \n so that the image can be seen over the heatmap.\n\n Default is 0.65.\n\n\n :raises ValueError: if ``alpha_limit`` is outside the [0, 1] interval.\n :raises TypeError: if ``alpha_limit`` is not float, int, or None.\n :type alpha_limit: float or int, optional\n\n\n Returns\n -------\n overlay : PIL.Image.Image\n PIL image instance of the heatmap blended over the image.\n \"\"\"\n image = expl.image\n # validate image\n if not isinstance(image, Image.Image):\n raise TypeError('Explanation image must be a PIL.Image.Image instance. '\n 'Got: {}'.format(image))\n if image.mode != 'RGBA':\n # normalize to 'RGBA'\n image = image.convert('RGBA')\n\n if not expl.targets:\n # no heatmaps\n return image\n else:\n assert len(expl.targets) == 1\n heatmap = expl.targets[0].heatmap\n _validate_heatmap(heatmap)\n\n # The order of our operations is: 1. colorize 2. resize\n # as opposed: 1. resize 2. colorize\n\n # save the original heatmap values\n heatvals = heatmap\n # apply colours to the grayscale array\n heatmap = _colorize(heatmap, colormap=colormap) # -> rank 3 RGBA array\n\n # make the alpha intensity correspond to the grayscale heatmap values\n # cap the intensity so that it's not too opaque when near maximum value\n _update_alpha(heatmap, starting_array=heatvals, alpha_limit=alpha_limit)\n\n heatmap = expand_heatmap(heatmap, image, resampling_filter=resampling_filter)\n overlay = _overlay_heatmap(heatmap, image)\n return overlay\n\n\ndef heatmap_to_image(heatmap):\n # type: (np.ndarray) -> Image\n \"\"\"\n Convert the numpy array ``heatmap`` to a Pillow image.\n\n Parameters\n ----------\n heatmap : numpy.ndarray\n Rank 2 grayscale ('L') array or rank 3 coloured ('RGB' or RGBA') array,\n with values in interval [0, 1] as floats.\n\n\n :raises TypeError: if ``heatmap`` is not a numpy array.\n :raises ValueError: if ``heatmap`` does not contain values as floats in the interval [0, 1].\n :raises ValueError: if ``heatmap`` rank is neither 2 nor 3.\n :raises ValueError: if rank 3 ``heatmap`` does not have 4 (RGBA) or 3 (RGB) channels.\n\n\n Returns\n -------\n heatmap_image : PIL.Image.Image\n Heatmap as an image with a suitable mode.\n \"\"\"\n _validate_heatmap(heatmap)\n rank = len(heatmap.shape)\n if rank == 2:\n mode = 'L'\n elif rank == 3:\n channels = heatmap.shape[2]\n if channels == 4:\n mode = 'RGBA'\n elif channels == 3:\n mode = 'RGB'\n else:\n raise ValueError('Rank 3 heatmap must have 4 channels (RGBA), '\n 'or 3 channels (RGB). '\n 'Got shape with {} channels'.format(channels))\n else:\n raise ValueError('heatmap must have rank 2 (L, grayscale) ' \n 'or rank 3 (RGBA, colored). '\n 'Got: %d' % rank)\n heatmap = (heatmap*255).astype('uint8') # -> [0, 255] int\n return Image.fromarray(heatmap, mode=mode)\n\n\ndef _validate_heatmap(heatmap):\n \"\"\"Check that ``heatmap`` is a numpy array\n with float values between 0 and 1.\"\"\"\n if not isinstance(heatmap, np.ndarray):\n raise TypeError('heatmap must be a numpy.ndarray instance. '\n 'Got: {}'.format(heatmap))\n mi = np.min(heatmap)\n ma = np.max(heatmap)\n if not (0 <= mi and ma <= 1):\n raise ValueError('heatmap must contain float values '\n 'between 0 and 1 inclusive. '\n 'Got array with minimum: {} ' \n 'and maximum: {}'.format(mi, ma))\n\n\ndef _colorize(heatmap, colormap):\n # type: (np.ndarray, Callable[[np.ndarray], np.ndarray]) -> np.ndarray\n \"\"\"\n Apply the ``colormap`` function to a grayscale \n rank 2 ``heatmap`` array (with float values in interval [0, 1]).\n Returns an RGBA rank 3 array with float values in range [0, 1].\n \"\"\"\n heatmap = colormap(heatmap) # -> [0, 1] RGBA ndarray\n return heatmap\n\n\ndef _update_alpha(image_array, starting_array=None, alpha_limit=None):\n # type: (np.ndarray, Optional[np.ndarray], Optional[Union[float, int]]) -> None\n \"\"\"\n Update the alpha channel values of an RGBA rank 3 ndarray ``image_array``,\n optionally creating the alpha channel from rank 2 ``starting_array``, \n and setting upper limit for alpha values (opacity) to ``alpha_limit``.\n\n This function modifies ``image_array`` in-place.\n \"\"\"\n # FIXME: this function may be too specialized and could be refactored\n # get the alpha channel slice\n if isinstance(starting_array, np.ndarray):\n alpha = starting_array\n else:\n # take the alpha channel as is\n alpha = image_array[:,:,3]\n # set maximum alpha value\n alpha = _cap_alpha(alpha, alpha_limit)\n # update alpha channel in the original image\n image_array[:,:,3] = alpha\n\n\ndef _cap_alpha(alpha_arr, alpha_limit):\n # type: (np.ndarray, Union[None, float, int]) -> np.ndarray\n \"\"\"\n Limit the alpha values in ``alpha_arr``\n by setting the maximum alpha value to ``alpha_limit``.\n Returns a a new array with the values capped.\n \"\"\"\n if alpha_limit is None:\n return alpha_arr\n elif isinstance(alpha_limit, (float, int)):\n if 0 <= alpha_limit <= 1:\n new_alpha = np.minimum(alpha_arr, alpha_limit)\n return new_alpha\n else:\n raise ValueError('alpha_limit must be' \n 'between 0 and 1 inclusive, got: %f' % alpha_limit)\n else:\n raise TypeError('alpha_limit must be int or float,' \n 'got: {}'.format(alpha_limit))\n\n\ndef expand_heatmap(heatmap, image, resampling_filter=Image.LANCZOS):\n # type: (np.ndarray, Image, Union[None, int]) -> Image\n \"\"\"\n Resize the ``heatmap`` image array to fit over the original ``image``,\n using the specified ``resampling_filter`` method.\n The heatmap is converted to an image in the process.\n\n Parameters\n ----------\n heatmap : numpy.ndarray\n Heatmap that is to be resized, as an array.\n\n image : PIL.Image.Image\n The image whose dimensions will be resized to.\n\n resampling_filter : int or None\n Interpolation to use when resizing.\n\n See :func:`eli5.format_as_image` for more details on the `resampling_filter` parameter.\n\n\n :raises TypeError: if ``image`` is not a Pillow image instance.\n\n\n Returns\n -------\n resized_heatmap : PIL.Image.Image\n The heatmap, resized, as a PIL image.\n \"\"\"\n if not isinstance(image, Image.Image):\n raise TypeError('image must be a PIL.Image.Image instance. '\n 'Got: {}'.format(image))\n heatmap = heatmap_to_image(heatmap)\n spatial_dimensions = (image.width, image.height)\n heatmap = heatmap.resize(spatial_dimensions, resample=resampling_filter)\n return heatmap\n\n\ndef _overlay_heatmap(heatmap, image):\n # type: (Image, Image) -> Image\n \"\"\"\n Blend (combine) ``heatmap`` over ``image``, \n using alpha channel values appropriately (must have mode `RGBA`).\n Output is 'RGBA'.\n \"\"\"\n # note that the order of alpha_composite arguments matters\n overlayed_image = Image.alpha_composite(image, heatmap)\n return overlayed_image" ]
[ [ "numpy.max", "numpy.min", "numpy.minimum" ] ]
jason-neal/companion_simulations
[ "b5773e5539011d492b7128d0dd2778041ce50d52" ]
[ "bin/coadd_chi2_db.py" ]
[ "#!/usr/bin/env python\n\"\"\"Co-add_chi2_values.py.\n\nCreate Table of minimum Chi_2 values and save to a table.\n\"\"\"\nimport argparse\nimport warnings\nimport glob\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy as sa\n\nimport simulators\n\n\ndef parse_args(args):\n \"\"\"Take care of all the argparse stuff.\n\n :returns: the args\n \"\"\"\n parser = argparse.ArgumentParser(description='Create Co-added Chi-squared db.')\n parser.add_argument('star', help='Star names')\n parser.add_argument(\"obsnum\", help=\"Observation number\")\n parser.add_argument('--suffix', default=\"\",\n help='Suffix to add to the file names.')\n parser.add_argument('-v', '--verbose', action=\"store_true\",\n help='Enable verbose.')\n parser.add_argument('-r', '--replace', action=\"store_true\",\n help='Overwrite the database if already exists.')\n parser.add_argument('-c', '--chunksize', default=1000, type=int,\n help='Chinksize for reading in csv files.')\n parser.add_argument(\"-m\", '--move', action=\"store_true\",\n help='Move original files after joining (default=False).')\n\n return parser.parse_args(args)\n\n\ndef main(star, obsnum, suffix, replace=False, verbose=False, chunksize=1000, move=False):\n \"\"\"\"\"\"\n star = star.upper()\n if suffix is None:\n suffix = \"\"\n\n patterns = [os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\",\n \"{0}-{1}_{2}_iam_chisqr_results{3}*.csv\".format(star, obsnum, chip, suffix))\n for chip in range(1, 5)]\n print(patterns)\n if (sum(1 for _ in glob.iglob(patterns[0]))) == 0:\n print(\"Patterns were not found\")\n patterns = [os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\", \"processed_csv\",\n \"{0}-{1}_{2}_iam_chisqr_results{3}*.csv\".format(star, obsnum, chip, suffix))\n for chip in range(1, 5)]\n\n print(\"new Patterns\", patterns)\n if sum(sum(1 for _ in glob.iglob(pattern)) for pattern in patterns) == 0:\n raise ValueError(\"Issue with patterns finding for {0} obs {1}\".format(star, obsnum))\n\n # Start up database\n coadd_database = os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\",\n \"{0}-{1}_coadd_iam_chisqr_results{2}.db\".format(star, obsnum, suffix))\n\n # print(\"Replace\", replace)\n print(\"os.path.isfile(coadd_database)\", os.path.isfile(coadd_database))\n if os.path.isfile(coadd_database):\n if replace:\n os.remove(coadd_database)\n else:\n raise IOError(\"The database file {0} already exists. Add the switch\"\n \" -r to replace the old database file.\".format(coadd_database))\n\n database_name = 'sqlite:///{0}'.format(coadd_database)\n engine = sa.create_engine(database_name)\n if verbose:\n print(\"csv_database =\", engine, type(engine))\n\n print(\"pattern lengths\", [sum(1 for _ in glob.iglob(pattern)) for pattern in patterns])\n\n # get list of patterns. and sort in order for loading in.\n detector_files = [sorted(glob.glob(pattern)) for pattern in patterns]\n\n i, j = 0, 1\n for num, files in enumerate(zip(*detector_files)):\n assert len(files) == 4\n f_0 = files[0]\n\n if \"[\" in f_0:\n n = f_0.split(\"[\")[-1]\n n = n.split(\"]\")[0]\n assert all(n in f for f in files) # All have this same host\n teff, logg, feh = [float(x) for x in n.split(\"_\")]\n if verbose:\n print(\"host params\", teff, logg, feh)\n host_flag = True\n else:\n host_flag = False\n teff, logg, feh = np.nan, np.nan, np.nan\n warnings.warn(\"No host parameter values found in file name.\")\n\n # Initalize iterators:\n iterators = [pd.read_csv(f, iterator=True, chunksize=chunksize) for f in files]\n\n while True:\n try:\n chunks = [pd_iter.get_chunk() for pd_iter in iterators]\n assert all([len(chunks[k]) == len(chunks[l])\n for k, l in ((0, 1), (1, 2), (2, 3))])\n except StopIteration:\n break\n\n joint_12 = pd.merge(chunks[0], chunks[1], how=\"outer\", suffixes=[\"_1\", \"_2\"],\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n joint_34 = pd.merge(chunks[2], chunks[3], how=\"outer\", suffixes=[\"_3\", \"_4\"],\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n pd_joint = pd.merge(joint_12, joint_34, how=\"outer\",\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n\n # co-adding chisquare values across detectors\n pd_joint[\"coadd_chi2\"] = pd_joint[\"chi2_1\"] + pd_joint[\"chi2_2\"] + pd_joint[\"chi2_3\"] + pd_joint[\"chi2_4\"]\n pd_joint[\"coadd_npix\"] = pd_joint[\"npix_1\"] + pd_joint[\"npix_2\"] + pd_joint[\"npix_3\"] + pd_joint[\"npix_4\"]\n\n if pd_joint.isnull().values.any():\n print(pd_joint)\n assert not pd_joint.isnull().values.any(), \"There are nans in the joint DataFrame!!!\"\n\n # Adding host parameters\n pd_joint[\"teff_1\"] = teff\n pd_joint[\"logg_1\"] = logg\n pd_joint[\"feh_1\"] = feh\n pd_joint = pd_joint.rename(columns={c: c.replace(' ', '').lower() for c in pd_joint.columns})\n pd_joint.index += j\n\n i += 1\n pd_joint.to_sql('chi2_table', engine, if_exists='append')\n j = pd_joint.index[-1] + 1\n if verbose:\n print(\"Indicies = \", i, j)\n\n if move:\n for f in files:\n f_split = os.path.split(f) # [\"head\", \"tail\"]\n new_f = os.path.join(f_split[0], \"processed_csv\", f_split[1])\n os.makedirs(os.path.dirname(new_f), exist_ok=True)\n os.rename(f, new_f)\n\n if verbose:\n print(\"Reached end of part =\", num)\n\n if verbose:\n print(\"Completed coadd db creation\")\n\n return None\n\nif __name__ == \"__main__\":\n args = vars(parse_args(sys.argv[1:]))\n\n opts = {k: args[k] for k in args}\n main(**opts)\n print(\"\\nNow use coadd_analysis.py\")\n" ]
[ [ "pandas.read_csv", "pandas.merge" ] ]
baronrustamov/pytext
[ "9790943736e7c0ac53095be2e20177be6fc529a9" ]
[ "pytext/data/masked_util.py" ]
[ "#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Dict, List, Optional, Set\n\nimport numpy as np\nfrom pytext.common.constants import SpecialTokens, Token\nfrom pytext.config.component import Component, ComponentType\nfrom pytext.config.pytext_config import ConfigBase\nfrom pytext.data.data_structures.annotation import Annotation, Intent, Root, Slot\nfrom pytext.data.utils import VocabBuilder, Vocabulary\n\n\nclass MaskedVocabBuilder(VocabBuilder):\n def __init__(self, delimiter=\" \"):\n super().__init__(delimiter)\n self.use_mask = True\n\n\nSPECIAL_TOKENS: Dict[str, Token] = {\n str(SpecialTokens.MASK): SpecialTokens.MASK,\n str(SpecialTokens.BOS): SpecialTokens.BOS,\n str(SpecialTokens.EOS): SpecialTokens.EOS,\n}\n\n\nclass MaskingFunction(Component):\n class Config(ConfigBase):\n pass\n\n __EXPANSIBLE__ = True\n __COMPONENT_TYPE__ = ComponentType.MASKING_FUNCTION\n\n @classmethod\n def from_config(cls, config, use_bos, use_eos):\n return cls(use_bos, use_eos)\n\n def __init__(self, use_bos, use_eos):\n self.use_bos = use_bos\n self.use_eos = use_eos\n\n def should_mask(self, *args, **kwargs) -> bool:\n return True\n\n def gen_masked_source_target(self, tokens, *args, **kwargs):\n raise NotImplementedError()\n\n def _prepare_dec_target(\n self, dec_source: List[int], clean_input_tokens: List[int], vocab: Vocabulary\n ) -> List[int]:\n dec_target = [\n vocab.get_pad_index()\n if dec_source_token != vocab.get_mask_index()\n else dec_real_target_token\n for (dec_source_token, dec_real_target_token) in zip(\n dec_source, clean_input_tokens\n )\n ]\n\n return dec_target\n\n\nclass TreeMask(MaskingFunction):\n class Config(ConfigBase):\n accept_flat_intents_slots: bool = True\n factor: int = 2\n\n @classmethod\n def from_config(cls, config, use_bos, use_eos):\n return cls(config.accept_flat_intents_slots, config.factor, use_bos, use_eos)\n\n def __init__(self, accept_flat_intents_slots, factor, use_bos, use_eos):\n super().__init__(use_bos, use_eos)\n self.accept_flat_intents_slots = accept_flat_intents_slots\n self.factor = factor\n\n def clean_eos_bos(self, tokens):\n start_index, end_index = 0, len(tokens)\n if self.use_bos:\n start_index = 1\n if self.use_eos:\n end_index = -1\n return tokens[start_index:end_index]\n\n def gen_masked_tree(self, node, mask_token, depth=1):\n if self.should_mask(depth):\n actual_str_len = len(node.flat_str().strip().split(\" \"))\n return \" \".join([mask_token for idx in range(actual_str_len)])\n else:\n return_str = \" \"\n if (\n isinstance(node, Intent)\n or isinstance(node, Slot)\n or isinstance(node, Root)\n ):\n return_str += \"[\"\n return_str += node.label\n return_str += \" \"\n for child in node.children:\n return_str += self.gen_masked_tree(child, mask_token, depth + 1)\n return_str += \" \"\n return_str += \"]\"\n else:\n return_str += node.label\n return_str += \" \"\n return return_str.strip()\n\n def should_mask(self, depth=1):\n return np.random.random() < 1.0 / (self.factor ** depth)\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n cleaned_tokens = self.clean_eos_bos(tokens)\n original_target_string = \" \".join(\n [vocab[idx] for idx in cleaned_tokens]\n ).upper()\n try:\n annotation = Annotation(\n original_target_string,\n accept_flat_intents_slots=self.accept_flat_intents_slots,\n )\n except Exception as e:\n # This should never happen other than when testing\n print(e, original_target_string)\n dec_source = [vocab.idx[vocab.mask_token] for _ in range(len(tokens))]\n dec_target = [vocab.idx[vocab.pad_token] for _ in range(len(tokens))]\n return dec_source, dec_target\n assert len(annotation.root.children) == 1\n mask_tree_str = self.gen_masked_tree(\n annotation.root.children[0], vocab.mask_token\n )\n\n # We are calling the .split() instead of the tokenize() of tensorizer\n # because the input str contains special MASK token __MASK__\n # It we call tokenize() on this input_str, it may lower __MASK__ or split\n # in unexpected ways causing issues.\n # Hence temporary workaround is that we call split(\" \") and lower all tokens\n # other than MASK tokens\n\n # handle special tokens in vocab\n mask_tree_str: List[str] = list(\n map(\n lambda token: SPECIAL_TOKENS.get(token, token.lower()),\n mask_tree_str.split(\" \"),\n )\n )\n\n dec_source = [vocab.idx.get(t) for t in mask_tree_str]\n\n dec_target = self._prepare_dec_target(dec_source, cleaned_tokens, vocab)\n\n if self.use_bos:\n if self.should_mask():\n dec_source.insert(0, vocab.get_mask_index())\n dec_target.insert(0, vocab.get_bos_index())\n else:\n dec_source.insert(0, vocab.get_bos_index())\n dec_target.insert(0, vocab.get_pad_index())\n\n if self.use_eos:\n if self.should_mask():\n dec_source.append(vocab.get_mask_index())\n dec_target.append(vocab.get_eos_index())\n else:\n dec_source.append(vocab.get_eos_index())\n dec_target.append(vocab.get_pad_index())\n return dec_source, dec_target\n\n\nclass MaskEverything(MaskingFunction):\n def gen_masked_tree(self, node, mask_token, depth=1):\n actual_str_len = len(node.flat_str().strip().split(\" \"))\n return \" \".join([mask_token for idx in range(actual_str_len)])\n\n def gen_masked_source_target(self, tokens, vocab: Vocabulary):\n dec_source: List[int] = [vocab.get_mask_index() for idx in tokens]\n dec_target = self._prepare_dec_target(dec_source, tokens, vocab)\n return dec_source, dec_target\n\n\nclass RandomizedMaskingFunction(MaskingFunction):\n class Config(MaskingFunction.Config):\n seed: Optional[int] = None\n minimum_masks: int = 1\n\n @classmethod\n def from_config(cls, config: Config, use_bos: bool, use_eos: bool):\n return cls(config.seed, config.minimum_masks, use_bos, use_eos)\n\n def __init__(\n self, seed: Optional[int], minimum_masks: int, use_bos: bool, use_eos: bool\n ):\n super().__init__(use_bos, use_eos)\n self.random = np.random.RandomState(seed)\n self.minimum_masks = minimum_masks\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n num_masks = self.random.randint(self.minimum_masks, len(tokens))\n\n ind: Set[int] = set(\n self.random.choice(len(tokens), size=num_masks, replace=False)\n )\n\n dec_source: List[int] = [\n vocab.get_mask_index() if idx in ind else token\n for idx, token in enumerate(tokens)\n ]\n\n dec_target = self._prepare_dec_target(dec_source, tokens, vocab)\n\n return dec_source, dec_target\n\n\nclass NoOpMaskingFunction(MaskingFunction):\n class Config(MaskingFunction.Config):\n seed: Optional[int] = None\n minimum_masks: int = 1\n\n @classmethod\n def from_config(cls, config: Config, use_bos: bool, use_eos: bool):\n return cls(config.seed, config.minimum_masks, use_bos, use_eos)\n\n def __init__(\n self, seed: Optional[int], minimum_masks: int, use_bos: bool, use_eos: bool\n ):\n super().__init__(use_bos, use_eos)\n self.random = np.random.RandomState(seed)\n self.minimum_masks = minimum_masks\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n dec_target = self._prepare_dec_target(tokens, tokens, vocab)\n\n return tokens, dec_target\n" ]
[ [ "numpy.random.RandomState", "numpy.random.random" ] ]
XavierValParejo/SeedBot
[ "7b338184ac9137027c726c43b481c2f79ad12b51" ]
[ "Code/Mapping/ultrasound_mapping.py" ]
[ "import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport lidar_to_grid_map as lg\nfrom grid_mapping_for_a_star import OccupancyGridMap\nfrom a_star_for_ogm_testing import a_star\n\nf = \"mesures.txt\"\n\ndef read_measures(file):\n measures = [line.split(\",\") for line in open(file)]\n angles = []\n distances = []\n for measure in measures:\n angles.append(float(measure[0]))\n distances.append(float(measure[1]))\n ang = np.array(angles)\n dist = np.array(distances)\n return dist,ang\n\ndef map_surroundings(dist):\n xyreso = 0.02 # x-y grid resolution\n yawreso = math.radians(3.1) # yaw angle resolution [rad]\n ox = np.sin(ang) * dist\n oy = np.cos(ang) * dist\n pmap, minx, maxx, miny, maxy, xyreso = lg.generate_ray_casting_grid_map(ox, oy, xyreso, False)\n xyres = np.array(pmap).shape\n return pmap\n\ndef input_points(pmap):\n for x in dist:\n x = x / 10\n ogm = OccupancyGridMap(pmap, 1)\n path, path_idx = a_star((25,30), (50,40),ogm)\n xPath, yPath = zip(*path)\n return ogm\n\ndist, ang = read_measures(f)\nmapp = map_surroundings(dist)\ntipus_=input_points(mapp)" ]
[ [ "numpy.array", "numpy.sin", "numpy.cos" ] ]
vivid-k/code
[ "c39d5a0ba219b499e595812a31362a8f2535859e" ]
[ "AREL-data-process/Data_process.py" ]
[ "import json\nimport os.path as osp\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nfrom collections import Counter\nimport numpy\nimport h5py\nimport os\n\n\n\"\"\"\n处理文本数据,提取出story,并构建词表\n\"\"\"\nbase_path = \"AREL-data-process/\"\ntrain_data = json.load(open(osp.join(base_path, \"test.story-in-sequence.json\")))\n# train_data = None\nval_data = json.load(open(osp.join(base_path, \"val.story-in-sequence.json\")))\ntest_data = json.load(open(osp.join(base_path, \"test.story-in-sequence.json\")))\n\n### 处理图像数据\nprefix = [\"train\", \"val\", \"test\"]\nwhole_album2im = {}\nfor i, data in enumerate([train_data, val_data, test_data]):\n album2im = {} # 按照album存储图像数据,键为album_id,值为img_id,1-多\n for im in data['images']: # 遍历每一张图像\n if im['id'] == '210929621':\n print(im)\n if im['album_id'] not in album2im: # 以album区分,若album_id并未存储,则为新的album\n album2im[im['album_id']] = [im['id']]\n else: # 该album已存在,则append,注意数据已经按照时间排序\n if im['id'] not in album2im[im['album_id']]:\n album2im[im['album_id']].append(im['id'])\n whole_album2im[prefix[i]] = album2im\n\nfor i, data in enumerate([train_data, val_data, test_data]):\n a = [] # 按照album存储图像数据,键为album_id,值为img_id,1-多\n for im in data['images']: # 遍历每一张图像\n if im['id'] not in a: # 以album区分,若album_id并未存储,则为新的album\n a.append(im['id'])\n print(len(a))\n\n### 处理文本数据\nwhole_album = {}\nstory_lines = {} # 存储每个故事,每个故事五句话,index为0、5、10\nwhole_lines = {} # 存储每个故事,一行存储,index为0、1、2\nstory_line_count = 0 # 句子数量\nwhole_line_count = 0 # story数量\nfor i, data in enumerate([train_data, val_data, test_data]):\n album_mapping = {} # 存储story\n for annot_new in data[\"annotations\"]: # 遍历每组数据\n annot = annot_new[0] # album_id\n assert len(annot_new) == 1\n text = bytes.decode(annot['text'].encode('utf8')) # 字段中包含origin_text和text,前者为原始文本,后者为匿名文本\n if annot['story_id'] not in album_mapping: # story_id为这段描述的id(5张图片)\n album_mapping[annot['story_id']] = {\"text_index\": [story_line_count], \"flickr_id\": [annot['photo_flickr_id']], \"length\": 1, \n \"album_id\": annot['album_id'], \"album_flickr_id\": whole_album2im[prefix[i]][annot['album_id']],\n \"whole_text_index\": whole_line_count, \"origin_text\": text} # story_line_count表示一个句子的id,flickr_id为图片id,album_id,album_flickr_id对应album对应的图像列表,whole_text_index<story_line_count即为story数量\n story_lines[annot['story_id']] = [{\"index\": story_line_count, \"text\": text.split()}]\n whole_lines[annot['story_id']] = {\"index\": whole_line_count, \"text\": text.split()}\n whole_line_count +=1\n else:\n album_mapping[annot['story_id']][\"text_index\"].append(story_line_count)\n album_mapping[annot['story_id']][\"flickr_id\"].append(annot['photo_flickr_id'])\n album_mapping[annot['story_id']][\"length\"] += 1 # length计算当前story长度,1-5\n story_lines[annot['story_id']].append({\"index\": story_line_count, \"text\": text.split()}) \n whole_lines[annot['story_id']][\"text\"].extend(text.split())\n album_mapping[annot['story_id']][\"origin_text\"] += \" \" + text\n story_line_count += 1\n whole_album[prefix[i]] = album_mapping\n\nnew_story_lines = [] \nfor l in story_lines.values():\n for li in l:\n new_story_lines.append(li)\nstory_lines = new_story_lines\nwhole_lines = whole_lines.values()\n\nstory_lines = [r['text'] for r in sorted(story_lines, key=lambda thing: thing['index'])] # 一个句子存储一行\nwhole_lines = [r['text'] for r in sorted(whole_lines, key=lambda thing: thing['index'])] # 一个故事存储一行\n\nprint(len(story_lines))\nprint(len(whole_lines))\n\n\ncnt = Counter() # 可以进行计数,词表构建(词:出现次数)\nfor l in story_lines:\n words = l\n for w in words:\n cnt[w] += 1\nwords2id = {}\nidx = 2\n## 构建词表\nfor k, v in cnt.most_common():\n if v > 5:\n words2id[k] = idx\n idx += 1\nwords2id[\"<EOS>\"] = 0\nwords2id[\"<UNK>\"] = 1\nid2words = {v:k for k,v in words2id.items()}\nprint(len(id2words))\n\nwhole_album[\"words2id\"] = words2id\nwhole_album[\"id2words\"] = {v:k for k,v in words2id.items()}\n\n# 将文本转换为id\nid_story_lines = []\nfor l in story_lines:\n s = [words2id[w] if w in words2id else 1 for w in l]\n id_story_lines.append(s)\n\nid_whole_lines = []\nfor l in whole_lines:\n s = [words2id[w] if w in words2id else 1 for w in l]\n id_whole_lines.append(s)\n\n# 进行padding,padding为0,长度为105\nnew_id_whole_lines = []\nspecify_longest = 105\nfor i in range(len(id_whole_lines)):\n cur_len = len(id_whole_lines[i])\n if cur_len < specify_longest:\n new_id_whole_lines.append(id_whole_lines[i] + [0] * (specify_longest - cur_len))\n else:\n new_id_whole_lines.append(id_whole_lines[i][:specify_longest-1] + [0])\n# shape(50200,105)\ndata = numpy.asarray(new_id_whole_lines)\n\n# f = h5py.File(\"full_story.h5\", \"w\")\n# f.create_dataset(\"story\", data=data)\n# f.close()\n## 对单个句子进行padding,大小为30\nnew_id_story_lines = []\nspecify_longest = 30\nfor i in range(len(id_story_lines)):\n cur_len = len(id_story_lines[i])\n if cur_len < specify_longest:\n new_id_story_lines.append(id_story_lines[i] + [0] * (specify_longest - cur_len))\n else:\n new_id_story_lines.append(id_story_lines[i][:specify_longest-1] + [0])\n## (25100,30)\ndata = numpy.asarray(new_id_story_lines, \"int32\")\n\n# f = h5py.File(\"story.h5\", \"w\")\n# f.create_dataset(\"story\", data=data)\n# f.close()\n\n# # 删除图像少于5张的\n# for p in prefix:\n# path = \"/mnt/sshd/wenhuchen/VIST/images_256/{}/\".format(p)\n# deletables = []\n# for story_id, story in whole_album[p].items():\n# d = [osp.exists(osp.join(path, \"{}.jpg\".format(_))) for _ in story[\"flickr_id\"]]\n# if sum(d) < 5:\n# print(\"deleting {}\".format(story_id))\n# deletables.append(story_id)\n# else:\n# pass\n# for i in deletables:\n# del whole_album[p][i]\n\n# 构建图像与story的映射\nflickr_story_map = {}\nfor pre in prefix:\n album = whole_album[pre]\n for k, v in album.items():\n indexes = v['text_index']\n for i, flickr_id in enumerate(v['flickr_id']):\n if flickr_id not in flickr_story_map:\n flickr_story_map[flickr_id] = [indexes[i]]\n else:\n flickr_story_map[flickr_id].append(indexes[i])\n\n# 画出story的长度分布\n# length_distribution = [len(s) for s in whole_lines]\n# result = plt.hist(length_distribution, bins='auto', cumulative=True, normed=1)\n# plt.show()\n# length_distribution = [len(s) for s in story_lines]\n# result = plt.hist(length_distribution, bins='auto', cumulative=True, normed=1)\n# plt.hist(length_distribution, bins='auto')\n# plt.show()\n\n\n\"\"\"\n处理文本数据,提取出caption\n\"\"\"\nbase_path = \"AREL-data-process/dii/\"\ntrain_data = json.load(open(osp.join(base_path, \"train.description-in-isolation.json\")))\nval_data = json.load(open(osp.join(base_path, \"val.description-in-isolation.json\")))\ntest_data = json.load(open(osp.join(base_path, \"test.description-in-isolation.json\")))\n\nmapping = {}\nmapping_original = {}\ntext_list = []\ntext_list_count = 0\nunknown_words = 0\ntotal_words = 0\nwith_story = 0\nno_story = 0\nfor i, data in enumerate([train_data, val_data, test_data]):\n mapping[prefix[i]] = {}\n mapping_original[prefix[i]] = {}\n for l in data['annotations']:\n if l[0]['photo_flickr_id'] not in mapping[prefix[i]]:\n if l[0]['photo_flickr_id'] in flickr_story_map:\n stories = flickr_story_map[l[0]['photo_flickr_id']]\n else:\n stories = [-1]\n mapping[prefix[i]][l[0]['photo_flickr_id']] = {'caption': [text_list_count], 'story': stories}\n mapping_original[prefix[i]][l[0]['photo_flickr_id']] = [l[0]['text']]\n else:\n mapping[prefix[i]][l[0]['photo_flickr_id']]['caption'].append(text_list_count)\n mapping_original[prefix[i]][l[0]['photo_flickr_id']].append(l[0]['text'])\n text_list_count += 1\n assert len(l) == 1\n s = []\n for w in l[0]['text'].split(\" \"):\n if w in words2id:\n s.append(words2id[w]) \n else:\n s.append(1)\n unknown_words += 1\n total_words += 1\n text_list.append(s)\nfor pre in prefix:\n count = 0\n for i in mapping[pre]:\n value = mapping[pre][i]\n if len(value['caption']) == 0:\n count += 1\n print(count)\n\nprint(\"unknown words percent is {}\".format(unknown_words / (total_words + 0.0)))\nnew_text_list = []\nspecify_longest = 20\nfor i in range(len(text_list)):\n cur_len = len(text_list[i])\n if cur_len < specify_longest:\n new_text_list.append(text_list[i] + [0] * (specify_longest - cur_len))\n else:\n new_text_list.append(text_list[i][:specify_longest - 1] + [0]) \n\n# for p in prefix:\n# path = \"/mnt/sshd/wenhuchen/VIST/images_256/{}/\".format(p)\n# deletables = []\n# for flickr_id, story in mapping[p].items():\n# if not osp.exists(osp.join(path, \"{}.jpg\".format(flickr_id))):\n# deletables.append(flickr_id)\n# for i in deletables:\n# del mapping[p][i]\n# del mapping_original[p][i]\n \nwhole_album[\"image2caption\"] = mapping\nwhole_album[\"image2caption_original\"] = mapping_original\n\n# with open(\"story_line.json\", 'w') as f:\n# json.dump(whole_album, f)\n\ntext_array = numpy.asarray(new_text_list, dtype='int32')\n\n# f = h5py.File(\"description.h5\", 'w')\n# f.create_dataset(\"story\", data=text_array)\n# f.close()\n\nval_data = json.load(open(osp.join(base_path, \"val.description-in-isolation.json\")))\nwith open(\"val_desc_reference\", \"w\") as f:\n for l in val_data['annotations']:\n # print >> f, \"{}\\t{}\".format(l[0]['photo_flickr_id'], l[0]['text'])\n print(l[0]['photo_flickr_id'], l[0]['text'])\n\nf = h5py.File(\"full_story.h5\", \"r\")\nprint(f['story'][0])\n\nf = h5py.File(\"story.h5\", \"r\")\nprint(f['story'].shape)\n\nf = open(\"story_line.json\", 'r')\ndata = json.load(f)\nprint(len(data['id2words']))\n\n# zero_fc = numpy.zeros((2048, ), \"float32\")\n# zero_conv = numpy.zeros((2048, 7, 7), \"float32\")\n\n# train_fc_base = \"/mnt/sshd/xwang/VIST/feature/train/fc\"\n# train_conv_base = \"/mnt/sshd/xwang/VIST/feature/train/conv\"\n# train_name1 = [l.split(\".\")[0] for l in os.listdir(train_fc_base)]\n\n# train_image_base = \"/mnt/sshd/wenhuchen/VIST/images/train\"\n# train_name2 = [l.split(\".\")[0] for l in os.listdir(train_image_base)]\n\n# rest = set(train_name2) - set(train_name1)\n# for image in rest:\n# numpy.save(os.path.join(train_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(train_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# val_fc_base = \"/mnt/sshd/xwang/VIST/feature/val/fc\"\n# val_conv_base = \"/mnt/sshd/xwang/VIST/feature/val/conv\"\n# val_name1 = [l.split(\".\")[0] for l in os.listdir(val_fc_base)]\n\n# val_image_base = \"/mnt/sshd/wenhuchen/VIST/images/val\"\n# val_name2 = [l.split(\".\")[0] for l in os.listdir(val_image_base)]\n\n# rest = set(val_name2) - set(val_name1)\n# for image in rest:\n# numpy.save(os.path.join(val_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(val_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# test_fc_base = \"/mnt/sshd/xwang/VIST/feature/test/fc\"\n# test_conv_base = \"/mnt/sshd/xwang/VIST/feature/test/conv\"\n# test_name1 = [l.split(\".\")[0] for l in os.listdir(test_fc_base)]\n\n# test_image_base = \"/mnt/sshd/wenhuchen/VIST/images/test\"\n# test_name2 = [l.split(\".\")[0] for l in os.listdir(test_image_base)]\n\n# rest = set(test_name2) - set(test_name1)\n# for image in rest:\n# numpy.save(os.path.join(test_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(test_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# with open(\"story_line.json\", 'r') as f: \n# data = json.load(f)\n\n# print(len(data['image2caption']['train']))\n# print(len(data['train']))" ]
[ [ "numpy.asarray" ] ]
neuralmagic/yolact
[ "68ea8f6edcc0d61047a95071fa22d8d271164605" ]
[ "layers/box_utils.py" ]
[ "# -*- coding: utf-8 -*-\nimport torch\nfrom utils import timer\n\nfrom data import cfg\n\[email protected]\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\[email protected]\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2] ), 1) # w, h\n\[email protected]\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [n,A,4].\n box_b: (tensor) bounding boxes, Shape: [n,B,4].\n Return:\n (tensor) intersection area, Shape: [n,A,B].\n \"\"\"\n n = box_a.size(0)\n A = box_a.size(1)\n B = box_b.size(1)\n max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))\n min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))\n return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter\n\n\ndef jaccard(box_a, box_b, iscrowd:bool=False):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n use_batch = True\n if box_a.dim() == 2:\n use_batch = False\n box_a = box_a[None, ...]\n box_b = box_b[None, ...]\n\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *\n (box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]\n area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *\n (box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n\n out = inter / area_a if iscrowd else inter / union\n return out if use_batch else out.squeeze(0)\n\ndef elemwise_box_iou(box_a, box_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the inner dimension. \"\"\"\n max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])\n min_xy = torch.max(box_a[:, :2], box_b[:, :2])\n inter = torch.clamp((max_xy - min_xy), min=0)\n inter = inter[:, 0] * inter[:, 1]\n\n area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])\n area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])\n\n union = area_a + area_b - inter\n union = torch.clamp(union, min=0.1)\n\n # Return value is [n] for inputs [n, 4]\n return torch.clamp(inter / union, max=1)\n\ndef mask_iou(masks_a, masks_b, iscrowd=False, device='cpu'):\n \"\"\"\n Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].\n The output is of size [a, b].\n\n Wait I thought this was \"box_utils\", why am I putting this in here?\n \"\"\"\n\n masks_a = masks_a.view(masks_a.size(0), -1)\n masks_b = masks_b.view(masks_b.size(0), -1)\n masks_a = masks_a.to(device)\n masks_b = masks_b.to(device)\n intersection = masks_a.to(device) @ masks_b.t().to(device)\n area_a = masks_a.sum(dim=1).unsqueeze(1)\n area_b = masks_b.sum(dim=1).unsqueeze(0)\n\n return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a\n\ndef elemwise_mask_iou(masks_a, masks_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the outer dimension. \"\"\"\n masks_a = masks_a.view(-1, masks_a.size(-1))\n masks_b = masks_b.view(-1, masks_b.size(-1))\n\n intersection = (masks_a * masks_b).sum(dim=0)\n area_a = masks_a.sum(dim=0)\n area_b = masks_b.sum(dim=0)\n\n # Return value is [n] for inputs [h, w, n]\n return torch.clamp(intersection / torch.clamp(area_a + area_b - intersection, min=0.1), max=1)\n\n\n\ndef change(gt, priors):\n \"\"\"\n Compute the d_change metric proposed in Box2Pix:\n https://lmb.informatik.uni-freiburg.de/Publications/2018/UB18/paper-box2pix.pdf\n \n Input should be in point form (xmin, ymin, xmax, ymax).\n\n Output is of shape [num_gt, num_priors]\n Note this returns -change so it can be a drop in replacement for \n \"\"\"\n num_priors = priors.size(0)\n num_gt = gt.size(0)\n\n gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors)\n gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors)\n\n gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4)\n pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4)\n\n diff = gt_mat - pr_mat\n diff[:, :, 0] /= gt_w\n diff[:, :, 2] /= gt_w\n diff[:, :, 1] /= gt_h\n diff[:, :, 3] /= gt_h\n\n return -torch.sqrt( (diff ** 2).sum(dim=2) )\n\n\n\n\ndef match(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx, loc_data):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n pos_thresh: (float) IoU > pos_thresh ==> positive.\n neg_thresh: (float) IoU < neg_thresh ==> negative.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n crowd_boxes: (tensor) All the crowd box annotations or None if there are none.\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. Note: -1 means neutral.\n idx_t: (tensor) Tensor to be filled w/ the index of the matched gt box for each prior.\n idx: (int) current batch index.\n loc_data: (tensor) The predicted bbox regression coordinates for this batch.\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n decoded_priors = decode(loc_data, priors, cfg.use_yolo_regressors) if cfg.use_prediction_matching else point_form(priors)\n \n # Size [num_objects, num_priors]\n overlaps = jaccard(truths, decoded_priors) if not cfg.use_change_matching else change(truths, decoded_priors)\n\n # Size [num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n\n # We want to ensure that each gt gets used at least once so that we don't\n # waste any training data. In order to do that, find the max overlap anchor\n # with each gt, and force that anchor to use that gt.\n for _ in range(overlaps.size(0)):\n # Find j, the gt with the highest overlap with a prior\n # In effect, this will loop through overlaps.size(0) in a \"smart\" order,\n # always choosing the highest overlap first.\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n j = best_prior_overlap.max(0)[1]\n\n # Find i, the highest overlap anchor with this gt\n i = best_prior_idx[j]\n\n # Set all other overlaps with i to be -1 so that no other gt uses it\n overlaps[:, i] = -1\n # Set all other overlaps with j to be -1 so that this loop never uses j again\n overlaps[j, :] = -1\n\n # Overwrite i's score to be 2 so it doesn't get thresholded ever\n best_truth_overlap[i] = 2\n # Set the gt to be used for i to be j, overwriting whatever was there\n best_truth_idx[i] = j\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n\n conf[best_truth_overlap < pos_thresh] = -1 # label as neutral\n conf[best_truth_overlap < neg_thresh] = 0 # label as background\n\n # Deal with crowd annotations for COCO\n if crowd_boxes is not None and cfg.crowd_iou_threshold < 1:\n # Size [num_priors, num_crowds]\n crowd_overlaps = jaccard(decoded_priors, crowd_boxes, iscrowd=True)\n # Size [num_priors]\n best_crowd_overlap, best_crowd_idx = crowd_overlaps.max(1)\n # Set non-positives with crowd iou of over the threshold to be neutral.\n conf[(conf <= 0) & (best_crowd_overlap > cfg.crowd_iou_threshold)] = -1\n\n loc = encode(matches, priors, cfg.use_yolo_regressors)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n idx_t[idx] = best_truth_idx # [num_priors] indices for lookup\n\[email protected]\ndef encode(matched, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Encode bboxes matched with each prior into the format\n produced by the network. See decode for more details on\n this format. Note that encode(decode(x, p), p) = x.\n \n Args:\n - matched: A tensor of bboxes in point form with shape [num_priors, 4]\n - priors: The tensor of all priors with shape [num_priors, 4]\n Return: A tensor with encoded relative coordinates in the format\n outputted by the network (see decode). Size: [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Exactly the reverse of what we did in decode\n # In fact encode(decode(x, p), p) should be x\n boxes = center_size(matched)\n\n loc = torch.cat((\n boxes[:, :2] - priors[:, :2],\n torch.log(boxes[:, 2:] / priors[:, 2:])\n ), 1)\n else:\n variances = [0.1, 0.2]\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n loc = torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n \n return loc\n\[email protected]\ndef decode(loc, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Decode predicted bbox coordinates using the same scheme\n employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf\n\n b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x\n b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y\n b_w = prior_w * exp(loc_w)\n b_h = prior_h * exp(loc_h)\n \n Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]\n while priors are inputed as [x, y, w, h] where each coordinate\n is relative to size of the image (even sigmoid(x)). We do this\n in the network by dividing by the 'cell size', which is just\n the size of the convouts.\n \n Also note that prior_x and prior_y are center coordinates which\n is why we have to subtract .5 from sigmoid(pred_x and pred_y).\n \n Args:\n - loc: The predicted bounding boxes of size [num_priors, 4]\n - priors: The priorbox coords with size [num_priors, 4]\n \n Returns: A tensor of decoded relative coordinates in point form \n form with size [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Decoded boxes in center-size notation\n boxes = torch.cat((\n loc[:, :2] + priors[:, :2],\n priors[:, 2:] * torch.exp(loc[:, 2:])\n ), 1)\n\n boxes = point_form(boxes)\n else:\n variances = [0.1, 0.2]\n \n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n \n return boxes\n\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\[email protected]\ndef sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):\n \"\"\"\n Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size.\n Also converts from relative to absolute coordinates and casts the results to long tensors.\n\n If cast is false, the result won't be cast to longs.\n Warning: this does things in-place behind the scenes so copy if necessary.\n \"\"\"\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n x1 = torch.min(_x1, _x2)\n x2 = torch.max(_x1, _x2)\n x1 = torch.clamp(x1-padding, min=0)\n x2 = torch.clamp(x2+padding, max=img_size)\n\n return x1, x2\n\n\[email protected]\ndef crop(masks, boxes, padding:int=1):\n \"\"\"\n \"Crop\" predicted masks by zeroing out everything not in the predicted bbox.\n Vectorized by Chong (thanks Chong).\n\n Args:\n - masks should be a size [h, w, n] tensor of masks\n - boxes should be a size [n, 4] tensor of bbox coords in relative point form\n \"\"\"\n h, w, n = masks.size()\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)\n\n rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n)\n cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n)\n \n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_up = cols >= y1.view(1, 1, -1)\n masks_down = cols < y2.view(1, 1, -1)\n \n crop_mask = masks_left * masks_right * masks_up * masks_down\n \n return masks * crop_mask.float()\n\n\ndef index2d(src, idx):\n \"\"\"\n Indexes a tensor by a 2d index.\n\n In effect, this does\n out[i, j] = src[i, idx[i, j]]\n \n Both src and idx should have the same size.\n \"\"\"\n\n offs = torch.arange(idx.size(0), device=idx.device)[:, None].expand_as(idx)\n idx = idx + offs * idx.size(1)\n\n return src.view(-1)[idx.view(-1)].view(idx.size())\n" ]
[ [ "torch.min", "torch.exp", "torch.log", "torch.arange", "torch.max", "torch.cat", "torch.clamp" ] ]
dimitrymindlin/DenseNetMuraPytorch
[ "ef3a872d739b015e3618c00265acb481dc251342" ]
[ "pipeline.py" ]
[ "import os\nimport pandas as pd\nfrom tqdm import tqdm\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets.folder import pil_loader\n\nfrom configs.mura_config import mura_config\n\ndata_cat = ['train', 'valid'] # data categories\n\ndef get_study_level_data(study_types):\n \"\"\"\n Returns a dict, with keys 'train' and 'valid' and respective values as study level dataframes, \n these dataframes contain three columns 'Path', 'Count', 'Label'\n Args:\n study_type (list): one or many of the seven study type folder names in 'train/valid/test' dataset\n \"\"\"\n study_data = {}\n study_label = {mura_config['data']['class_names'][1]: 1, mura_config['data']['class_names'][0]: 0}\n for phase in data_cat:\n for study_type in study_types:\n BASE_DIR = 'mura/%s/%s/' % (phase, study_type)\n patients = list(os.walk(BASE_DIR))[0][1] # list of patient folder names\n study_data[phase] = pd.DataFrame(columns=['Path', 'Count', 'Label'])\n i = 0\n for patient in tqdm(patients): # for each patient folder\n for study in os.listdir(BASE_DIR + patient): # for each study in that patient folder\n label = study_label[study.split('_')[1]] # get label 0 or 1\n path = BASE_DIR + patient + '/' + study + '/' # path to this study\n study_data[phase].loc[i] = [path, len(os.listdir(path)), label] # add new row\n i+=1\n return study_data\n\nclass ImageDataset(Dataset):\n \"\"\"training dataset.\"\"\"\n\n def __init__(self, df, transform=None):\n \"\"\"\n Args:\n df (pd.DataFrame): a pandas DataFrame with image path and labels.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.df = df\n self.transform = transform\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n study_path = self.df.iloc[idx, 0]\n count = self.df.iloc[idx, 1]\n images = []\n for i in range(count):\n image = pil_loader(study_path + 'image%s.png' % (i+1))\n images.append(self.transform(image))\n images = torch.stack(images)\n label = self.df.iloc[idx, 2]\n sample = {'images': images, 'label': label}\n return sample\n\ndef get_dataloaders(data, batch_size=8, study_level=False):\n '''\n Returns dataloader pipeline with data augmentation\n '''\n if mura_config['train']['augmentation']:\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((mura_config['data']['image_height'], mura_config['data']['image_width'])),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n else:\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((mura_config['data']['image_height'], mura_config['data']['image_width'])),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n image_datasets = {x: ImageDataset(data[x], transform=data_transforms[x]) for x in data_cat}\n dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in data_cat}\n return dataloaders\n\nif __name__=='main':\n pass\n" ]
[ [ "torch.utils.data.DataLoader", "torch.stack", "pandas.DataFrame" ] ]
ruidongjr/Aldi
[ "0d2dad1ab180abb59bee15d9e5e851e4de4e8cd5" ]
[ "libraries/deep_sort/deep_sort/deep/feature_extractor.py" ]
[ "import torch\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\nimport logging\n\nfrom .model import Net\n\nclass Extractor(object):\n def __init__(self, model_path, use_cuda=True):\n self.net = Net(reid=True)\n self.device = \"cuda\" if torch.cuda.is_available() and use_cuda else \"cpu\"\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']\n self.net.load_state_dict(state_dict)\n logger = logging.getLogger(\"root.tracker\")\n logger.info(\"Loading weights from {}... Done!\".format(model_path))\n self.net.to(self.device)\n self.size = (64, 128)\n self.norm = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n \n\n\n def _preprocess(self, im_crops):\n \"\"\"\n TODO:\n 1. to float with scale from 0 to 1\n 2. resize to (64, 128) as Market1501 dataset did\n 3. concatenate to a numpy array\n 3. to torch Tensor\n 4. normalize\n \"\"\"\n def _resize(im, size):\n return cv2.resize(im.astype(np.float32)/255., size)\n\n im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()\n return im_batch\n\n\n def __call__(self, im_crops):\n im_batch = self._preprocess(im_crops)\n with torch.no_grad():\n im_batch = im_batch.to(self.device)\n features = self.net(im_batch)\n return features.cpu().numpy()\n\n\nif __name__ == '__main__':\n img = cv2.imread(\"demo.jpg\")[:,:,(2,1,0)]\n extr = Extractor(\"checkpoint/ckpt.t7\")\n feature = extr(img)\n" ]
[ [ "torch.no_grad", "torch.cuda.is_available", "torch.load" ] ]
aalbersk/DeepRec
[ "f673a950780959b44dcda99398880a1d883ab338" ]
[ "sparse_operation_kit/unit_test/test_scripts/tf1/test_sparse_emb_demo.py" ]
[ "\"\"\"\n Copyright (c) 2021, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport argparse\nimport sys, os\nsys.path.append(os.path.abspath(os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../../\")))\n# os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nimport sparse_operation_kit as sok\nimport tensorflow as tf\nimport utils\nfrom sparse_models import SOKDemo, TFDemo\nfrom test_dense_emb_demo import check_saved_embedding_variables\nimport strategy_wrapper\nimport numpy as np\n\n\ndef get_sok_results(args, init_tensors, *random_samples):\n if args.distributed_tool == \"onedevice\":\n strategy = strategy_wrapper.OneDeviceStrategy()\n elif args.distributed_tool == \"horovod\":\n import horovod.tensorflow as hvd\n hvd.init()\n strategy = strategy_wrapper.HorovodStrategy()\n else:\n raise ValueError(f\"{args.distributed_tool} is not supported.\")\n\n with strategy.scope():\n sok_init_op = sok.Init(global_batch_size=args.global_batch_size)\n\n sok_sparse_demo = SOKDemo(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,\n embedding_vec_size=args.embedding_vec_size,\n combiner=args.combiner,\n slot_num=args.slot_num,\n max_nnz=args.max_nnz,\n use_hashtable=args.use_hashtable,\n num_of_dense_layers=0)\n \n emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)\n dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)\n\n sok_saver = sok.Saver()\n restore_op = list()\n for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):\n control_inputs = [restore_op[-1]] if restore_op else None\n with tf.control_dependencies(control_inputs):\n if args.restore_params:\n filepath = r\"./embedding_variables\"\n op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath)\n else:\n op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])\n restore_op.append(op)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=\"none\")\n def _replica_loss(labels, logits):\n loss = loss_fn(labels, logits)\n return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)\n\n def _train_step(inputs, labels, training):\n def _step_fn(inputs, labels):\n logit, embedding_vector = sok_sparse_demo(inputs, training=training)\n loss = _replica_loss(labels, logit)\n emb_var, other_var = sok.split_embedding_variable_from_others(sok_sparse_demo.trainable_variables)\n grads = tf.gradients(loss, emb_var + other_var, colocate_gradients_with_ops=True,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n emb_grads, other_grads = grads[:len(emb_var)], grads[len(emb_var):]\n if \"plugin\" in args.optimizer:\n emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))\n else:\n with sok.OptimizerScope(emb_var):\n emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))\n with tf.control_dependencies([*emb_grads]):\n # in case NCCL runs concurrently via SOK and horovod\n other_grads = strategy.reduce(\"sum\", other_grads)\n other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var))\n\n with tf.control_dependencies([emb_train_op, other_train_op]):\n total_loss = strategy.reduce(\"sum\", loss)\n total_loss = tf.identity(total_loss)\n return total_loss, embedding_vector\n return strategy.run(_step_fn, inputs, labels)\n\n replica_batch_size = args.global_batch_size // args.gpu_num\n dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size,\n to_sparse_tensor=True, repeat=1)\n train_iterator = dataset.make_initializable_iterator()\n iterator_init = train_iterator.initializer\n\n inputs, labels = train_iterator.get_next()\n graph_results = _train_step(inputs, labels, training=True)\n \n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n if \"plugin\" in args.optimizer:\n init_op = tf.group(init_op, emb_opt.initializer)\n\n save_op = list()\n for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):\n control_inputs = [save_op[-1]] if save_op else None\n with tf.control_dependencies(control_inputs):\n if args.save_params:\n filepath = r\"./embedding_variables/\"\n utils.try_make_dirs(filepath)\n op = sok_saver.dump_to_file(embedding_layer.embedding_variable, filepath)\n else:\n op = tf.constant(1.0)\n save_op.append(op)\n\n sok_results = list()\n\n with tf.Session() as sess:\n sess.run(sok_init_op)\n sess.run([init_op, iterator_init])\n sess.run(restore_op)\n sess.graph.finalize()\n\n for step in range(args.iter_num):\n loss_v, emb_vector_v = sess.run([*graph_results])\n print(\"*\" * 80)\n print(f\"Step: {step}, loss: {loss_v}, embedding_vector:\\n{emb_vector_v}\")\n sok_results.append(emb_vector_v)\n\n sess.run(save_op)\n\n name = list()\n for embedding_layer in sok_sparse_demo.embedding_layers:\n name.append(embedding_layer.embedding_variable.m_var_name)\n \n return sok_results, name\n\ndef get_tf_results(args, init_tensors, *random_samples):\n graph = tf.Graph()\n with graph.as_default():\n tf_sparse_demo = TFDemo(vocabulary_size=args.max_vocabulary_size_per_gpu * args.gpu_num,\n embedding_vec_size=args.embedding_vec_size,\n combiner=args.combiner,\n slot_num=args.slot_num,\n max_nnz=args.max_nnz,\n use_hashtable=args.use_hashtable,\n num_of_dense_layers=0)\n \n optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n def _train_step(inputs, labels, training):\n logit, embedding_vector = tf_sparse_demo(inputs, training=training)\n loss = loss_fn(labels, logit)\n grads = tf.gradients(loss, tf_sparse_demo.trainable_variables,\n colocate_gradients_with_ops=True,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables))\n with tf.control_dependencies([train_op]):\n loss = tf.identity(loss)\n return loss, embedding_vector\n\n\n dataset = utils.tf_dataset(*random_samples, batchsize=args.global_batch_size,\n to_sparse_tensor=True, repeat=1)\n train_iterator = dataset.make_initializable_iterator()\n iterator_init = train_iterator.initializer\n\n inputs, labels = train_iterator.get_next()\n graph_results = _train_step(inputs, labels, training=True)\n\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n restore_op = list()\n for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights):\n restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0)))\n\n emb_values = list()\n for embedding_weight in tf_sparse_demo.embedding_weights:\n if args.save_params:\n filepath = r\"./embedding_variables/\"\n utils.try_make_dirs(filepath)\n emb_values.append(embedding_weight.read_value())\n else:\n emb_values = tf.constant(1.0)\n\n tf_results = list()\n with tf.Session(graph=graph) as sess:\n sess.run([init_op, iterator_init])\n sess.run(restore_op)\n sess.graph.finalize()\n\n for step in range(args.iter_num):\n loss_v, emb_vector_v = sess.run([*graph_results])\n print(\"*\" * 80)\n print(f\"step: {step}, loss: {loss_v}, embedding_vector:\\n{emb_vector_v}\")\n tf_results.append(emb_vector_v)\n\n emb_values_v = sess.run(emb_values)\n if args.save_params:\n for i, value in enumerate(emb_values_v):\n utils.save_to_file(os.path.join(filepath, r\"tf_variable_\" + str(i) + r\".file\"),\n value)\n \n name = list()\n for embedding_weight in tf_sparse_demo.embedding_weights:\n name.append(embedding_weight.name)\n\n return tf_results, name\n\ndef compare_sparse_emb_sok_with_tf(args):\n if args.global_batch_size % args.gpu_num != 0:\n raise ValueError(f\"global_batch_size: {args.global_batch_size} is not divisible \"\n f\"by gpu_num: {args.gpu_num}\")\n\n if args.use_hashtable:\n vocabulary_size = args.max_vocabulary_size_per_gpu * args.gpu_num\n else:\n vocabulary_size = args.max_vocabulary_size_per_gpu\n\n if args.generate_new_datas:\n replica_batch_size = args.global_batch_size // args.gpu_num\n random_samples = utils.generate_random_samples(num_of_samples=replica_batch_size * args.iter_num,\n vocabulary_size=vocabulary_size,\n slot_num=sum(args.slot_num),\n max_nnz=args.max_nnz,\n use_sparse_mask=True)\n utils.save_to_file(r\"./random_samples_\" + str(args.rank_idx) + r\".file\", *random_samples)\n else:\n random_samples = utils.restore_from_file(r\"./random_samples_\" + str(args.rank_idx) + r\".file\")\n\n if args.restore_params:\n filepath = r\"./embedding_variables\"\n # because we already checked the variable consistency when saving\n # so that we can directly use TF Variable file to initialize\n # TF's Variable and SOK's Variable\n init_tensors = list()\n for i in range(len(args.slot_num)):\n tf_values_filename = os.path.join(filepath, r\"tf_variable_\" + str(i) + r\".file\")\n init_tensors.append(utils.restore_from_file(tf_values_filename))\n else:\n init_tensors = list()\n for i in range(len(args.slot_num)):\n init_tensors.append(utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,\n embedding_vec_size=args.embedding_vec_size[i],\n num=args.gpu_num))\n sok_results, variable_names = get_sok_results(args, init_tensors, *random_samples)\n utils.save_to_file(r\"./sok_embedding_vectors_\" + str(args.rank_idx) + r\".file\", *sok_results)\n\n if args.rank_idx != 0:\n return\n\n # aggregate dataset from different worker\n dataset_filenames = [r\"./random_samples_\" + str(rank_idx) + r\".file\"\n for rank_idx in range(args.rank_size)]\n random_samples_total = [list() for _ in range(args.iter_num)]\n random_labels_total = [list() for _ in range(args.iter_num)]\n local_batch_size = args.global_batch_size // args.gpu_num\n for rank_idx in range(args.rank_size):\n samples, labels = utils.restore_from_file(dataset_filenames[rank_idx])\n for i in range(args.iter_num):\n random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])\n random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])\n random_samples_total = np.concatenate(random_samples_total, axis=0)\n random_labels_total = np.concatenate(random_labels_total, axis=0)\n\n tf_results, _ = get_tf_results(args, init_tensors, random_samples_total, random_labels_total)\n\n # aggregate sok forward results from different worker\n sok_results_filenames = [r\"./sok_embedding_vectors_\" + str(rank_idx) + r\".file\"\n for rank_idx in range(args.rank_size)]\n sok_results_total = list()\n for filename in sok_results_filenames:\n sok_results = utils.restore_from_file(filename)\n sok_results_total.append(sok_results)\n\n if len(sok_results_total[0]) != len(tf_results):\n raise ValueError(\"The length of sok results is not equal to that of tensorflow.\")\n if len(sok_results) != args.iter_num:\n raise ValueError(\"The length of embedding vectors: %d is not equal to iteration number: %d.\"\n %(len(sok_results), args.iter_num))\n\n rtol, atol = 1e-3, 1e-3\n if args.restore_params:\n rtol, atol = rtol * 10, atol * 10\n if args.distributed_tool == \"horovod\":\n rtol, atol = rtol * 10, atol * 10\n for i in range(args.iter_num):\n sok_vector = np.concatenate([sok_results_total[rank_idx][i]\n for rank_idx in range(args.rank_size)], axis=0)\n allclose = np.allclose(sok_vector, tf_results[i], rtol=rtol, atol=atol)\n if not allclose:\n raise ValueError(f\"\\n{sok_vector} \\nis not near to \\n{tf_results[i]} \\nat rtol={rtol}, atol={atol}\")\n\n print(f\"\\n[INFO]: For {len(args.slot_num)} Sparse Embedding layer, using {args.gpu_num} GPUs + {args.optimizer} optimizer, \"\n f\"using hashtable? {args.use_hashtable}, combiner = {args.combiner}, the embedding vectors\"\n f\" obtained from sok and tf are consistent for {args.iter_num} iterations.\")\n\n if args.save_params:\n check_saved_embedding_variables(args, variable_names,\n use_hashtable=args.use_hashtable, gpu_num=args.gpu_num)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--gpu_num\", type=int, required=False, default=1)\n parser.add_argument(\"--distributed_tool\", type=str, required=False, \n choices=[\"horovod\", \"onedevice\"], default=\"onedevice\")\n parser.add_argument(\"--iter_num\", type=int, required=False, default=50)\n parser.add_argument(\"--max_vocabulary_size_per_gpu\", type=int,\n required=False, default=1024)\n parser.add_argument(\"--combiner\", type=str, required=False, default=\"sum\",\n choices=[\"sum\", \"mean\"])\n parser.add_argument(\"--slot_num\", type=int, nargs=\"+\",\n help=\"the number of feature fileds\",\n required=False, default=1)\n parser.add_argument(\"--max_nnz\", type=int,\n help=\"the maximum of valid inputs\",\n required=False, default=1)\n parser.add_argument(\"--embedding_vec_size\", type=int, nargs=\"+\",\n required=False, default=1)\n parser.add_argument(\"--global_batch_size\", type=int, required=False,\n default=16)\n parser.add_argument(\"--optimizer\", type=str, required=False, \n default=\"adam\", choices=[\"plugin_adam\", \"adam\", \"sgd\", \"compat_adam\"])\n parser.add_argument(\"--generate_new_datas\", type=int, choices=[0, 1],\n required=False, default=1)\n parser.add_argument(\"--save_params\", type=int, choices=[0, 1],\n required=False, default=1)\n parser.add_argument(\"--restore_params\", type=int, choices=[0, 1],\n required=False, default=0)\n parser.add_argument(\"--use_hashtable\", type=int, choices=[0, 1],\n required=False, default=1)\n\n args = parser.parse_args()\n\n args.generate_new_datas = True if args.generate_new_datas == 1 else False\n args.save_params = True if args.save_params == 1 else False\n args.restore_params = True if args.restore_params == 1 else False\n args.use_hashtable = True if args.use_hashtable == 1 else False\n\n if (args.distributed_tool == \"onedevice\" and args.gpu_num != 1):\n raise ValueError(f\"When 'onedevice' is used as the distributed_tool, \"\n f\"gpu_num must be 1, which is {args.gpu_num}\")\n\n if args.distributed_tool == \"onedevice\":\n available_gpus = \",\".join(map(str, range(args.gpu_num)))\n rank_size = args.gpu_num\n rank_idx = 0\n else:\n # gpu_num will be ignored.\n rank_size = os.getenv(\"OMPI_COMM_WORLD_SIZE\")\n if rank_size is None:\n raise ValueError(f\"When distributed_tool is set to {args.distributed_tool}, \"\n \"mpiexec / mpirun must be used to launch this program.\")\n rank_size = int(rank_size)\n rank_idx = int(os.getenv(\"OMPI_COMM_WORLD_RANK\"))\n\n available_gpus = str(rank_idx)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = available_gpus\n\n args.rank_size = rank_size\n args.rank_idx = rank_idx\n args.gpu_num = rank_size\n\n compare_sparse_emb_sok_with_tf(args)" ]
[ [ "numpy.allclose", "tensorflow.global_variables_initializer", "tensorflow.Graph", "tensorflow.nn.compute_average_loss", "tensorflow.constant", "tensorflow.keras.losses.BinaryCrossentropy", "tensorflow.gradients", "tensorflow.Session", "tensorflow.concat", "tensorflow.identity", "numpy.concatenate", "tensorflow.group", "tensorflow.local_variables_initializer", "tensorflow.control_dependencies" ] ]
richardrl/graphics
[ "c05ee5b947bc462881968b4a109a9ba59ff8c6a8" ]
[ "tensorflow_graphics/rendering/opengl/math.py" ]
[ "# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module implements math routines used by OpenGL.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_graphics.geometry.transformation import look_at\nfrom tensorflow_graphics.math.interpolation import weighted\nfrom tensorflow_graphics.rendering.camera import perspective\nfrom tensorflow_graphics.util import asserts\nfrom tensorflow_graphics.util import export_api\nfrom tensorflow_graphics.util import shape\n\n\ndef model_to_eye(point_model_space,\n camera_position,\n look_at_point,\n up_vector,\n name=\"model_to_eye\"):\n \"\"\"Transforms points from model to eye coordinates.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in model space.\n camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D position of the camera.\n look_at_point: A tensor of shape `[A1, ..., An, 3]`, with the last dimension\n storing the position where the camera is looking at.\n up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n defines the up vector of the camera.\n name: A name for this op. Defaults to \"model_to_eye\".\n\n Raises:\n ValueError: if the all the inputs are not of the same shape, or if any input\n of of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_model_space` in eye\n coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_model_space = tf.convert_to_tensor(value=point_model_space)\n camera_position = tf.convert_to_tensor(value=camera_position)\n look_at_point = tf.convert_to_tensor(value=look_at_point)\n up_vector = tf.convert_to_tensor(value=up_vector)\n\n shape.check_static(\n tensor=point_model_space,\n tensor_name=\"point_model_space\",\n has_dim_equals=(-1, 3))\n shape.compare_batch_dimensions(\n tensors=(point_model_space, camera_position),\n last_axes=-2,\n tensor_names=(\"point_model_space\", \"camera_position\"),\n broadcast_compatible=True)\n\n model_to_eye_matrix = look_at.right_handed(camera_position, look_at_point,\n up_vector)\n batch_shape = tf.shape(input=point_model_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_model_space.dtype)\n point_model_space = tf.concat((point_model_space, one), axis=-1)\n point_model_space = tf.expand_dims(point_model_space, axis=-1)\n res = tf.squeeze(tf.matmul(model_to_eye_matrix, point_model_space), axis=-1)\n return res[..., :-1]\n\n\ndef eye_to_clip(point_eye_space,\n vertical_field_of_view,\n aspect_ratio,\n near,\n far,\n name=\"eye_to_clip\"):\n \"\"\"Transforms points from eye to clip space.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_eye_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in eye coordinates.\n vertical_field_of_view: A tensor of shape `[A1, ..., An, 1]`, where the last\n dimension represents the vertical field of view of the frustum. Note that\n values for `vertical_field_of_view` must be in the range ]0,pi[.\n aspect_ratio: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n stores the width over height ratio of the frustum. Note that values for\n `aspect_ratio` must be non-negative.\n near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the near clipping plane. Note\n that values for `near` must be non-negative.\n far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension captures\n the distance between the viewer and the far clipping plane. Note that\n values for `far` must be non-negative.\n name: A name for this op. Defaults to \"eye_to_clip\".\n\n Raises:\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 4]`, containing `point_eye_space` in\n homogeneous clip coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_eye_space = tf.convert_to_tensor(value=point_eye_space)\n vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view)\n aspect_ratio = tf.convert_to_tensor(value=aspect_ratio)\n near = tf.convert_to_tensor(value=near)\n far = tf.convert_to_tensor(value=far)\n\n shape.check_static(\n tensor=point_eye_space,\n tensor_name=\"point_eye_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=vertical_field_of_view,\n tensor_name=\"vertical_field_of_view\",\n has_dim_equals=(-1, 1))\n shape.check_static(\n tensor=aspect_ratio, tensor_name=\"aspect_ratio\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=near, tensor_name=\"near\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=far, tensor_name=\"far\", has_dim_equals=(-1, 1))\n shape.compare_batch_dimensions(\n tensors=(point_eye_space, vertical_field_of_view, aspect_ratio, near,\n far),\n last_axes=-2,\n tensor_names=(\"point_eye_space\", \"vertical_field_of_view\",\n \"aspect_ratio\", \"near\", \"far\"),\n broadcast_compatible=True)\n\n perspective_matrix = perspective.right_handed(vertical_field_of_view,\n aspect_ratio, near, far)\n batch_shape = tf.shape(input=point_eye_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_eye_space.dtype)\n point_eye_space = tf.concat((point_eye_space, one), axis=-1)\n point_eye_space = tf.expand_dims(point_eye_space, axis=-1)\n\n return tf.squeeze(tf.matmul(perspective_matrix, point_eye_space), axis=-1)\n\n\ndef clip_to_ndc(point_clip_space, name=\"clip_to_ndc\"):\n \"\"\"Transforms points from clip to normalized device coordinates (ndc).\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last\n dimension represents points in clip space.\n name: A name for this op. Defaults to \"clip_to_ndc\".\n\n Raises:\n ValueError: If `point_clip_space` is not of size 4 in its last dimension.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in\n normalized device coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_clip_space = tf.convert_to_tensor(value=point_clip_space)\n\n shape.check_static(\n tensor=point_clip_space,\n tensor_name=\"point_clip_space\",\n has_dim_equals=(-1, 4))\n\n w = point_clip_space[..., -1:]\n return point_clip_space[..., :3] / w\n\n\ndef ndc_to_screen(point_ndc_space,\n lower_left_corner,\n screen_dimensions,\n near,\n far,\n name=\"ndc_to_screen\"):\n \"\"\"Transforms points from normalized device coordinates to screen coordinates.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible between `point_ndc_space` and the other variables.\n\n Args:\n point_ndc_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents points in normalized device coordinates.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the near clipping plane. Note\n that values for `near` must be non-negative.\n far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the far clipping plane. Note\n that values for `far` must be greater than those of `near`.\n name: A name for this op. Defaults to \"ndc_to_screen\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_ndc_space` in\n screen coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_ndc_space = tf.convert_to_tensor(value=point_ndc_space)\n lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)\n screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)\n near = tf.convert_to_tensor(value=near)\n far = tf.convert_to_tensor(value=far)\n\n shape.check_static(\n tensor=point_ndc_space,\n tensor_name=\"point_ndc_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=lower_left_corner,\n tensor_name=\"lower_left_corner\",\n has_dim_equals=(-1, 2))\n shape.check_static(\n tensor=screen_dimensions,\n tensor_name=\"screen_dimensions\",\n has_dim_equals=(-1, 2))\n shape.check_static(tensor=near, tensor_name=\"near\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=far, tensor_name=\"far\", has_dim_equals=(-1, 1))\n\n shape.compare_batch_dimensions(\n tensors=(lower_left_corner, screen_dimensions, near, far),\n last_axes=-2,\n tensor_names=(\"lower_left_corner\", \"screen_dimensions\", \"near\", \"far\"),\n broadcast_compatible=False)\n shape.compare_batch_dimensions(\n tensors=(point_ndc_space, near),\n last_axes=-2,\n tensor_names=(\"point_ndc_space\", \"near\"),\n broadcast_compatible=True)\n\n screen_dimensions = asserts.assert_all_above(\n screen_dimensions, 0.0, open_bound=True)\n near = asserts.assert_all_above(near, 0.0, open_bound=True)\n far = asserts.assert_all_above(far, near, open_bound=True)\n\n ndc_to_screen_factor = tf.concat(\n (screen_dimensions, far - near), axis=-1) / 2.0\n screen_center = tf.concat(\n (lower_left_corner + screen_dimensions / 2.0, (near + far) / 2.0),\n axis=-1)\n return ndc_to_screen_factor * point_ndc_space + screen_center\n\n\ndef model_to_screen(point_model_space,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"model_to_screen\"):\n \"\"\"Transforms points from model to screen coordinates.\n\n Note:\n Please refer to http://www.songho.ca/opengl/gl_transform.html for an\n in-depth review of this pipeline.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in model space.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"model_to_screen\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and\n `[A1, ..., An, 1]`, where the first tensor containing the projection of\n `point_model_space` in screen coordinates, and the second represents the 'w'\n component of `point_model_space` in clip space.\n \"\"\"\n with tf.name_scope(name):\n point_model_space = tf.convert_to_tensor(value=point_model_space)\n model_to_eye_matrix = tf.convert_to_tensor(value=model_to_eye_matrix)\n perspective_matrix = tf.convert_to_tensor(value=perspective_matrix)\n\n shape.check_static(\n tensor=point_model_space,\n tensor_name=\"point_model_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=model_to_eye_matrix,\n tensor_name=\"model_to_eye_matrix\",\n has_dim_equals=((-1, 4), (-2, 4)))\n shape.check_static(\n tensor=perspective_matrix,\n tensor_name=\"perspective_matrix\",\n has_dim_equals=((-1, 4), (-2, 4)))\n shape.compare_batch_dimensions(\n tensors=(point_model_space, model_to_eye_matrix, perspective_matrix),\n last_axes=(-2, -3, -3),\n tensor_names=(\"point_model_space\", \"model_to_eye_matrix\",\n \"perspective_matrix\"),\n broadcast_compatible=True)\n\n batch_shape = tf.shape(input=point_model_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_model_space.dtype)\n point_model_space = tf.concat((point_model_space, one), axis=-1)\n point_model_space = tf.expand_dims(point_model_space, axis=-1)\n\n view_projection_matrix = tf.linalg.matmul(perspective_matrix,\n model_to_eye_matrix)\n\n _, _, near, far = perspective.parameters_from_right_handed(\n perspective_matrix)\n\n point_clip_space = tf.squeeze(\n tf.matmul(view_projection_matrix, point_model_space), axis=-1)\n point_ndc_space = clip_to_ndc(point_clip_space)\n point_screen_space = ndc_to_screen(point_ndc_space, lower_left_corner,\n screen_dimensions, near, far)\n return point_screen_space, point_clip_space[..., 3:4]\n\n\ndef perspective_correct_barycentrics(triangle_vertices_model_space,\n pixel_position,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"perspective_correct_barycentrics\"):\n \"\"\"Computes perspective correct barycentrics.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n triangle_vertices_model_space: A tensor of shape `[A1, ..., An, 3, 3]`,\n where the last dimension represents the vertices of a triangle in model\n space.\n pixel_position: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension stores the position (in pixels) where the interpolation is\n requested.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"perspective_correct_barycentrics\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing perspective correct\n barycentric coordinates.\n \"\"\"\n with tf.name_scope(name):\n pixel_position = tf.convert_to_tensor(value=pixel_position)\n triangle_vertices_model_space = tf.convert_to_tensor(\n value=triangle_vertices_model_space)\n shape.check_static(\n tensor=pixel_position,\n tensor_name=\"pixel_position\",\n has_dim_equals=(-1, 2))\n shape.check_static(\n tensor=triangle_vertices_model_space,\n tensor_name=\"triangle_vertices_model_space\",\n has_dim_equals=((-2, 3), (-1, 3)))\n\n lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)\n screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)\n lower_left_corner = shape.add_batch_dimensions(\n lower_left_corner,\n \"lower_left_corner\",\n model_to_eye_matrix.shape[:-2],\n last_axis=-2)\n screen_dimensions = shape.add_batch_dimensions(\n screen_dimensions,\n \"screen_dimensions\",\n model_to_eye_matrix.shape[:-2],\n last_axis=-2)\n\n vertices_screen, vertices_w = model_to_screen(triangle_vertices_model_space,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner)\n vertices_w = tf.squeeze(vertices_w, axis=-1)\n pixel_position = tf.expand_dims(pixel_position, axis=-2)\n barycentric_coordinates, _ = weighted.get_barycentric_coordinates(\n vertices_screen[..., :2], pixel_position)\n barycentric_coordinates = tf.squeeze(barycentric_coordinates, axis=-2)\n coeffs = barycentric_coordinates / vertices_w\n return tf.linalg.normalize(coeffs, ord=1, axis=-1)[0]\n\n\ndef interpolate_attributes(attribute,\n barycentric,\n name=\"interpolate_attributes\"):\n \"\"\"Interpolates attributes using barycentric weights.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n attribute: A tensor of shape `[A1, ..., An, 3, B]`, where the last dimension\n stores a per-vertex `B`-dimensional attribute.\n barycentric: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n contains barycentric coordinates.\n name: A name for this op. Defaults to \"interpolate_attributes\".\n\n Returns:\n A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.\n \"\"\"\n with tf.name_scope(name):\n attribute = tf.convert_to_tensor(value=attribute)\n barycentric = tf.convert_to_tensor(value=barycentric)\n\n shape.check_static(\n tensor=attribute, tensor_name=\"attribute\", has_dim_equals=(-2, 3))\n shape.check_static(\n tensor=barycentric, tensor_name=\"barycentric\", has_dim_equals=(-1, 3))\n shape.compare_batch_dimensions(\n tensors=(attribute, barycentric),\n last_axes=(-2, -1),\n tensor_names=(\"attribute\", \"barycentric\"),\n broadcast_compatible=True)\n barycentric = asserts.assert_normalized(barycentric, order=1)\n return tf.reduce_sum(\n input_tensor=tf.expand_dims(barycentric, axis=-1) * attribute, axis=-2)\n\n\ndef perspective_correct_interpolation(triangle_vertices_model_space,\n attribute,\n pixel_position,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"perspective_correct_interpolation\"):\n \"\"\"Returns perspective corrected interpolation of attributes over triangles.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n triangle_vertices_model_space: A tensor of shape `[A1, ..., An, 3, 3]`,\n where the last dimension represents the vertices of a triangle in model\n space.\n attribute: A tensor of shape `[A1, ..., An, 3, B]`, where the last dimension\n stores a per-vertex `B`-dimensional attribute.\n pixel_position: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension stores the position (in pixels) where the interpolation is\n requested.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"perspective_correct_interpolation\".\n\n Raises:\n tf.errors.InvalidArgumentError: if any input contains data not in the\n specified range of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.\n \"\"\"\n with tf.name_scope(name):\n barycentric = perspective_correct_barycentrics(\n triangle_vertices_model_space, pixel_position, model_to_eye_matrix,\n perspective_matrix, screen_dimensions, lower_left_corner)\n return interpolate_attributes(attribute, barycentric)\n\n\n# API contains all public functions and classes.\n__all__ = export_api.get_functions_and_classes()\n" ]
[ [ "tensorflow.shape", "tensorflow.expand_dims", "tensorflow.matmul", "tensorflow.squeeze", "tensorflow.linalg.matmul", "tensorflow.name_scope", "tensorflow.convert_to_tensor", "tensorflow.concat", "tensorflow.linalg.normalize" ] ]
Anthuang/bdd100k
[ "b7e1781317784317e4e715ab325515ade73978a9" ]
[ "bdd100k/vis/viewer.py" ]
[ "\"\"\"An offline label visualizer for BDD100K file.\n\nWorks for 2D / 3D bounding box, segmentation masks, etc.\n\"\"\"\n\nimport argparse\nimport concurrent.futures\nfrom typing import Dict\n\nimport numpy as np\nfrom scalabel.common.parallel import NPROC\nfrom scalabel.common.typing import NDArrayF64\nfrom scalabel.label.typing import Label\nfrom scalabel.vis.controller import (\n ControllerConfig,\n DisplayConfig,\n ViewController,\n)\nfrom scalabel.vis.label import LabelViewer, UIConfig\n\nfrom ..label.label import drivables, labels, lane_categories\n\n\nclass LabelViewerBDD100K(LabelViewer):\n \"\"\"Basic class for viewing BDD100K labels.\"\"\"\n\n def __init__(self, ui_cfg: UIConfig) -> None:\n \"\"\"Initializer.\"\"\"\n super().__init__(ui_cfg)\n self.colors: Dict[str, NDArrayF64] = {\n label.name: np.array(label.color)\n for label in labels\n if not label.hasInstances\n }\n self.colors.update(\n {drivable.name: np.array(drivable.color) for drivable in drivables}\n )\n self.colors.update(\n {lane.name: np.array(lane.color) for lane in lane_categories}\n )\n\n def _get_label_color(self, label: Label) -> NDArrayF64:\n \"\"\"Get color by category and id.\"\"\"\n if label.category in self.colors:\n return self.colors[label.category] / 255.0\n return super()._get_label_color(label)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Use argparse to get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n \"\"\"\nInterface keymap:\n - n / p: Show next or previous image\n - Space: Start / stop animation\n - t: Toggle 2D / 3D bounding box (if avaliable)\n - a: Toggle the display of the attribute tags on boxes or polygons.\n - c: Toggle the display of polygon vertices.\n - Up: Increase the size of polygon vertices.\n - Down: Decrease the size of polygon vertices.\nExport images:\n - add `-o {dir}` tag when runing.\n \"\"\"\n )\n parser.add_argument(\"-i\", \"--image-dir\", help=\"image directory\")\n parser.add_argument(\n \"-l\",\n \"--labels\",\n required=False,\n default=\"labels.json\",\n help=\"Path to the json file\",\n type=str,\n )\n parser.add_argument(\n \"--height\",\n type=int,\n default=720,\n help=\"Height of the image (px)\",\n )\n parser.add_argument(\n \"--width\",\n type=int,\n default=1280,\n help=\"Width of the image (px)\",\n )\n parser.add_argument(\n \"-s\",\n \"--scale\",\n type=float,\n default=1.0,\n help=\"Scale up factor for annotation factor. \"\n \"Useful when producing visualization as thumbnails.\",\n )\n parser.add_argument(\n \"--no-attr\",\n action=\"store_true\",\n default=False,\n help=\"Do not show attributes\",\n )\n parser.add_argument(\n \"--no-box3d\",\n action=\"store_true\",\n default=True,\n help=\"Do not show 3D bounding boxes\",\n )\n parser.add_argument(\n \"--no-tags\",\n action=\"store_true\",\n default=False,\n help=\"Do not show tags on boxes or polygons\",\n )\n parser.add_argument(\n \"--no-vertices\",\n action=\"store_true\",\n default=False,\n help=\"Do not show vertices\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-dir\",\n required=False,\n default=None,\n type=str,\n help=\"output image directory with label visualization. \"\n \"If it is set, the images will be written to the \"\n \"output folder instead of being displayed \"\n \"interactively.\",\n )\n parser.add_argument(\n \"--range-begin\",\n type=int,\n default=0,\n help=\"from which frame to visualize. Default is 0.\",\n )\n parser.add_argument(\n \"--range-end\",\n type=int,\n default=-1,\n help=\"up to which frame to visualize. Default is -1, \"\n \"indicating loading all frames for visualizatoin.\",\n )\n parser.add_argument(\n \"--nproc\",\n type=int,\n default=NPROC,\n help=\"number of processes for json loading\",\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef main() -> None:\n \"\"\"Main function.\"\"\"\n args = parse_args()\n # Initialize the thread executor.\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n ui_cfg = UIConfig(\n height=args.height,\n width=args.width,\n scale=args.scale,\n )\n display_cfg = DisplayConfig(\n with_attr=not args.no_attr,\n with_box2d=args.no_box3d,\n with_box3d=not args.no_box3d,\n with_ctrl_points=not args.no_vertices,\n with_tags=not args.no_tags,\n )\n viewer = LabelViewer(ui_cfg)\n\n ctrl_cfg = ControllerConfig(\n image_dir=args.image_dir,\n label_path=args.labels,\n out_dir=args.output_dir,\n nproc=args.nproc,\n range_begin=args.range_begin,\n range_end=args.range_end,\n )\n controller = ViewController(ctrl_cfg, display_cfg, executor)\n viewer.run_with_controller(controller)\n\n\nif __name__ == \"__main__\":\n main()\n" ]
[ [ "numpy.array" ] ]
PedroRisquez/c19norge-data
[ "ac38460800d5d311877b949c74444b44a0916575" ]
[ "src/utils/graphs.py" ]
[ "import os\nfrom datetime import date\nimport altair as alt\nimport pandas as pd\n\n\ndef tested_lab():\n data = \"data/tested_lab.csv\"\n filename = \"graphs/tested_lab.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n mapping = {\n \"new_neg\": \"New (Negative)\",\n \"new_pos\": \"New (Positive)\",\n \"new_total\": \"New\",\n \"pr100_pos\": \"Share Positive\",\n \"total\": \"Cumulative\",\n }\n\n df = df.rename(columns=mapping)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df[\"Share Negative\"] = 100 - df[\"Share Positive\"]\n df = df.melt(\n id_vars=[\"date\", \"Share Positive\"], var_name=\"category\", value_name=\"value\"\n )\n\n base = alt.Chart(\n df,\n title=\"Number of tested persons per specimen collection date and number of positive results (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n andel = base.mark_line(color=\"red\", opacity=0.8).encode(\n y=alt.Y(\"Share Positive:Q\", title=\"% Positive\", axis=alt.Axis(grid=True))\n )\n\n bar = (\n base.transform_filter(\n (alt.datum.category == \"New (Negative)\")\n | (alt.datum.category == \"New (Positive)\")\n )\n .mark_bar()\n .encode(\n y=alt.Y(\"value:Q\", title=\"Number of persons\"),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New (Positive)\", \"New (Negative)\", \"% Positive\"],\n range=[\"#FF9622\", \"#6DA9FF\", \"red\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n chart = (\n alt.layer(bar, andel)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef confirmed():\n data = \"data/confirmed.csv\"\n filename = \"graphs/confirmed.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df.loc[df[\"source\"] == \"fhi:git\"]\n df[\"new_sma7\"] = df.new.rolling(window=7).mean().shift()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new\", \"new_sma7\", \"total\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\"new\": \"New cases\", \"new_sma7\": \"Avg 7 d.\", \"total\": \"Cumulative\"}\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(\n df,\n title=\"Number of reported COVID-19 cases by specimen collection date (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n bar = (\n base.transform_filter(alt.datum.category == \"New cases\")\n .mark_bar(color=\"#FFD1D1\")\n .encode(y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"New per day\", grid=True)))\n )\n\n line = (\n base.transform_filter(alt.datum.category == \"Cumulative\")\n .mark_line(color=\"#2E507B\", strokeWidth=3)\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Cumulative\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New cases\", \"Avg 7 d.\", \"Cumulative\"],\n range=[\"#FFD1D1\", \"red\", \"#2E507B\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n ma7 = (\n base.transform_filter(alt.datum.category == \"Avg 7 d.\")\n .mark_line(opacity=0.8)\n .encode(y=alt.Y(\"value:Q\"), color=alt.Color(\"category:N\"))\n )\n\n chart = (\n alt.layer(bar + ma7, line)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef dead():\n data = \"data/dead.csv\"\n filename = \"graphs/dead.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n today = date.today()\n idx = pd.date_range(\"2020-03-07\", df[\"date\"].max())\n df.index = pd.DatetimeIndex(df[\"date\"])\n df = df.reindex(idx)\n df[\"date\"] = df.index\n df = df.reset_index(drop=True)\n df = df[df.date <= str(today)]\n\n df[\"new\"] = df[\"new\"].fillna(0).astype(int)\n df[\"total\"] = df[\"total\"].fillna(method=\"bfill\").astype(int)\n df[\"new_sma7\"] = df.new.rolling(window=7).mean()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new\", \"new_sma7\", \"total\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\"new\": \"New\", \"new_sma7\": \"Avg 7 d.\", \"total\": \"Cumulative\"}\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(df, title=\"COVID-19 related deaths (Source: FHI)\").encode(\n alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40))\n )\n\n bar = (\n base.transform_filter(alt.datum.category == \"New\")\n .mark_bar(color=\"#FFD1D1\")\n .encode(y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"New per day\", grid=True)))\n )\n\n line = (\n base.transform_filter(alt.datum.category == \"Cumulative\")\n .mark_line(color=\"#2E507B\", strokeWidth=3)\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Cumulative\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New\", \"Avg 7 d.\", \"Cumulative\"],\n range=[\"#FFD1D1\", \"red\", \"#2E507B\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n ma7 = (\n base.transform_filter(alt.datum.category == \"Avg 7 d.\")\n .mark_line(opacity=0.8)\n .encode(y=alt.Y(\"value:Q\"), color=alt.Color(\"category:N\"))\n )\n\n chart = (\n alt.layer(bar + ma7, line)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef hospitalized():\n data = \"data/hospitalized.csv\"\n filename = \"graphs/hospitalized.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n today = date.today()\n idx = pd.date_range(\"2020-03-08\", today)\n df.index = pd.DatetimeIndex(df[\"date\"])\n df = df.reindex(idx)\n df[\"date\"] = df.index\n df = df.reset_index(drop=True)\n\n df[\"admissions\"] = df[\"admissions\"].fillna(method=\"ffill\").astype(int)\n df[\"respiratory\"] = df[\"respiratory\"].fillna(method=\"ffill\").astype(int)\n\n df_melt = pd.melt(\n df,\n id_vars=[\"date\"],\n value_vars=[\"admissions\", \"respiratory\"],\n value_name=\"value\",\n ).replace({\"admissions\": \"Hospitalized\", \"respiratory\": \"Respirator\"})\n\n chart = (\n alt.Chart(\n df_melt,\n title=\"Number of patients admitted to hospital with COVID-19 (Source: Helsedirektoratet)\",\n )\n .mark_area(line={}, opacity=0.3)\n .encode(\n x=alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)),\n y=alt.Y(\n \"value:Q\",\n stack=None,\n title=\"Number of patients\",\n ),\n color=alt.Color(\n \"variable:N\",\n scale=alt.Scale(\n domain=[\"Hospitalized\", \"Respirator\"], range=[\"#5A9DFF\", \"#FF8B1B\"]\n ),\n legend=alt.Legend(title=None),\n ),\n )\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef smittestopp():\n data = \"data/smittestopp.csv\"\n filename = \"graphs/smittestopp.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new_reported\", \"total_downloads\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\n \"new_reported\": \"Number of reported infections\",\n \"total_downloads\": \"Number of downloads\",\n }\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(\n df,\n title=\"Number of downloads of Smittestopp og number of reported infections through the app (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n downloads = (\n base.transform_filter(alt.datum.category == \"Number of downloads\")\n .mark_area(line={}, color=\"#5BC1FF\", opacity=0.2)\n .encode(\n y=alt.Y(\n \"value:Q\",\n axis=alt.Axis(title=\"Number of downloads\", grid=True),\n )\n )\n )\n\n reported = (\n base.transform_filter(alt.datum.category == \"Number of reported infections\")\n .mark_bar(color=\"#FFA57E\")\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Number of reported infections\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\n \"Number of downloads\",\n \"Number of reported infections\",\n ],\n range=[\"#5BC1FF\", \"#FFA57E\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n chart = (\n alt.layer(reported, downloads)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n labelLimit=200,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=390,\n legendY=660,\n )\n )\n\n chart.save(filename)\n\n\ndef vaccine_doses():\n data = \"data/vaccine_doses.csv\"\n filename = \"graphs/vaccine_doses.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[df[\"granularity_geo\"] == \"nation\"]\n df[\"new_sma7\"] = df.new_doses.rolling(window=7).mean().shift()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"total_dose_1\", \"total_dose_2\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\n \"total_dose_1\": \"Vaccinated with first dose\",\n \"total_dose_2\": \"Fully vaccinated\",\n }\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n chart = (\n alt.Chart(\n df,\n title=\"Number of people who received their first and second dose of a COVID-19 vaccine in Norway (Source: FHI)\",\n )\n .mark_area(line={}, opacity=0.3)\n .encode(\n x=alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)),\n y=alt.Y(\n \"value:Q\",\n stack=None,\n title=\"Number of people\",\n ),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\n \"Vaccinated with first dose\",\n \"Fully vaccinated\",\n ],\n range=[\"#5dade2\", \" #2ecc71\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=660,\n )\n )\n\n chart.save(filename)\n" ]
[ [ "pandas.DatetimeIndex", "pandas.date_range", "pandas.read_csv", "pandas.to_datetime", "pandas.melt" ] ]
jnsrch/disentangling-vae-cwt
[ "0e927bdcd3d149cadb30aa107331f0c071138c41" ]
[ "disvae/training.py" ]
[ "import imageio\nimport logging\nimport os\nfrom timeit import default_timer\nfrom collections import defaultdict\nfrom utils.datasets import DATASETS_DICT\n\nfrom tqdm import trange\nimport torch\nfrom torch.nn import functional as F\n\nfrom disvae.utils.modelIO import save_model\n\n\nTRAIN_LOSSES_LOGFILE = \"train_losses.log\"\n\n\nclass Trainer():\n \"\"\"\n Class to handle training of model.\n\n Parameters\n ----------\n model: disvae.vae.VAE\n\n optimizer: torch.optim.Optimizer\n\n loss_f: disvae.models.BaseLoss\n Loss function.\n\n device: torch.device, optional\n Device on which to run the code.\n\n logger: logging.Logger, optional\n Logger.\n\n save_dir : str, optional\n Directory for saving logs.\n\n gif_visualizer : viz.Visualizer, optional\n Gif Visualizer that should return samples at every epochs.\n\n is_progress_bar: bool, optional\n Whether to use a progress bar for training.\n \"\"\"\n\n def __init__(self, model, optimizer, loss_f,\n device=torch.device(\"cpu\"),\n logger=logging.getLogger(__name__),\n save_dir=\"results\",\n gif_visualizer=None,\n is_progress_bar=True):\n\n self.device = device\n self.model = model.to(self.device)\n self.loss_f = loss_f\n self.optimizer = optimizer\n self.save_dir = save_dir\n self.is_progress_bar = is_progress_bar\n self.logger = logger\n self.losses_logger = LossesLogger(os.path.join(self.save_dir, TRAIN_LOSSES_LOGFILE))\n self.gif_visualizer = gif_visualizer\n self.logger.info(\"Training Device: {}\".format(self.device))\n\n def __call__(self, data_loader,\n epochs=10,\n checkpoint_every=10):\n \"\"\"\n Trains the model.\n\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n\n epochs: int, optional\n Number of epochs to train the model for.\n\n checkpoint_every: int, optional\n Save a checkpoint of the trained model every n epoch.\n \"\"\"\n start = default_timer()\n self.model.train()\n for epoch in range(epochs):\n storer = defaultdict(list)\n mean_epoch_loss = self._train_epoch(data_loader, storer, epoch)\n self.logger.info('Epoch: {} Average loss per image: {:.2f}'.format(epoch + 1,\n mean_epoch_loss))\n self.losses_logger.log(epoch, storer)\n\n if self.gif_visualizer is not None:\n self.gif_visualizer()\n\n if epoch % checkpoint_every == 0:\n save_model(self.model, self.save_dir,\n filename=\"model-{}.pt\".format(epoch))\n\n if self.gif_visualizer is not None:\n self.gif_visualizer.save_reset()\n\n self.model.eval()\n\n delta_time = (default_timer() - start) / 60\n self.logger.info('Finished training after {:.1f} min.'.format(delta_time))\n\n def _train_epoch(self, data_loader, storer, epoch):\n \"\"\"\n Trains the model for one epoch.\n\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n\n storer: dict\n Dictionary in which to store important variables for vizualisation.\n\n epoch: int\n Epoch number\n\n Return\n ------\n mean_epoch_loss: float\n Mean loss per image\n \"\"\"\n epoch_loss = 0.\n kwargs = dict(desc=\"Epoch {}\".format(epoch + 1), leave=False,\n disable=not self.is_progress_bar)\n with trange(len(data_loader), **kwargs) as t:\n for _, data in enumerate(data_loader):\n data = data[0] # Tensors are nested in dataset\n iter_loss = self._train_iteration(data, storer)\n epoch_loss += iter_loss\n\n t.set_postfix(loss=iter_loss)\n t.update()\n\n mean_epoch_loss = epoch_loss / len(data_loader)\n return mean_epoch_loss\n\n def _train_iteration(self, data, storer):\n \"\"\"\n Trains the model for one iteration on a batch of data.\n\n Parameters\n ----------\n data: torch.Tensor\n A batch of data. Shape : (batch_size, channel, height, width).\n\n storer: dict\n Dictionary in which to store important variables for vizualisation.\n \"\"\"\n\n data = data.to(self.device)\n\n try:\n recon_batch, latent_dist, latent_sample = self.model(data)\n loss = self.loss_f(data, recon_batch, latent_dist, self.model.training,\n storer, latent_sample=latent_sample)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n except ValueError:\n # for losses that use multiple optimizers (e.g. Factor)\n loss = self.loss_f.call_optimize(data, self.model, self.optimizer, storer)\n\n return loss.item()\n\n\nclass LossesLogger(object):\n \"\"\"Class definition for objects to write data to log files in a\n form which is then easy to be plotted.\n \"\"\"\n\n def __init__(self, file_path_name):\n \"\"\" Create a logger to store information for plotting. \"\"\"\n if os.path.isfile(file_path_name):\n os.remove(file_path_name)\n\n self.logger = logging.getLogger(\"losses_logger\")\n self.logger.setLevel(1) # always store\n file_handler = logging.FileHandler(file_path_name)\n file_handler.setLevel(1)\n self.logger.addHandler(file_handler)\n\n header = \",\".join([\"Epoch\", \"Loss\", \"Value\"])\n self.logger.debug(header)\n\n def log(self, epoch, losses_storer):\n \"\"\"Write to the log file \"\"\"\n for k, v in losses_storer.items():\n log_string = \",\".join(str(item) for item in [epoch, k, mean(v)])\n self.logger.debug(log_string)\n\n\n# HELPERS\ndef mean(l):\n \"\"\"Compute the mean of a list\"\"\"\n return sum(l) / len(l)\n" ]
[ [ "torch.device" ] ]
alaaib/NetworkAnalysis
[ "bf45d616b3a2f40cec3879515fe8ecdbe19b0537" ]
[ "ClusterGivenGraph/main.py" ]
[ "import datetime\nimport sys\nimport time\nimport os\n\nfrom ClusterGivenGraph.Graph import Graph\nfrom ClusterGivenGraph.GraphHelper import get_graph_based_degree_sequence, create_motifs, export_to_pajek, \\\n motifs_main_calculation, calc_z_score, export_to_pajek_by_z_score\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport statistics\n\nglob_i = 0\nglob_itrs = 0\nglob_req_time = ''\n\n\ndef print_progress_bar(prefix='', suffix='', decimals=1, length=50, fill='█'):\n global glob_i\n glob_i = glob_i + 1\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (glob_i / float(glob_itrs)))\n filled_length = int(length * glob_i // glob_itrs)\n bar = fill * filled_length + '-' * (length - filled_length)\n print('\\r%s |%s| %s%% %s ' % (prefix, bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if glob_i == glob_itrs:\n print()\n\n\ndef start_here(node_cnt, p_val, edge_cnt, init_scale, tmp_uid):\n # print(\"------------------- Started A New Iteration \" + str(tmp_uid) + \" ---------------------\")\n file_location = './Output/' + str(tmp_uid) + \"/\"\n if not os.path.exists(file_location):\n os.makedirs(file_location)\n print_progress_bar() # print(\"Create Scale-free Graph\")\n\n st_graph_obj = Graph(n=node_cnt, p=p_val, e=edge_cnt, init_scale=init_scale)\n st_graph_obj.create_scale_free_graph()\n st_nx_graph = st_graph_obj.get_graph()\n\n print_progress_bar() # print(\"Create Random Graph\")\n\n nd_graph_obj = Graph(n=node_cnt, p=p_val, e=len(st_nx_graph.edges))\n nd_graph_obj.create_random_graph()\n nd_nx_graph = nd_graph_obj.get_graph()\n\n print_progress_bar() # print(\"Join Both Graph\")\n\n rd_nx_graph = st_graph_obj.compose_graph(nd_nx_graph)\n\n rd_nx_degree_sequence = [d for n, d in rd_nx_graph.degree()]\n\n th_nx_graph = get_graph_based_degree_sequence(rd_nx_degree_sequence, file_location)\n print_progress_bar() # print(\"Write Graphs to files\")\n\n nx.write_pajek(st_nx_graph, file_location + \"random.net\")\n nx.write_pajek(nd_nx_graph, file_location + \"scale-free.net\")\n nx.write_pajek(th_nx_graph,\n file_location + \"randomForMotifs.net\")\n rd_nx_graph_nl_lst = export_to_pajek(rd_nx_graph, st_graph_obj.n)\n\n with open(file_location + 'combinedFixed.net', 'w') as f:\n for item in rd_nx_graph_nl_lst:\n f.write(\"%s\\n\" % item)\n\n print_progress_bar() # print(\"Create Motifs\")\n motifs = create_motifs(init_scale=init_scale, p_val=p_val, file_location=file_location)\n print_progress_bar() # print(\"Calculate Similarity \")\n motifs_result = motifs_main_calculation(motifs, rd_nx_graph, th_nx_graph, file_location)\n print_progress_bar() # print(\"Calculate Z-Score\")\n z_score_result = calc_z_score(motifs_result, tmp_uid, file_location)\n max_z_score = 0\n max_z_score_key = None\n for tmp_k in z_score_result:\n if z_score_result[tmp_k] > max_z_score:\n max_z_score = z_score_result[tmp_k]\n max_z_score_key = tmp_k\n\n rd_nx_graph_nl_lst = export_to_pajek_by_z_score(rd_nx_graph, st_graph_obj.n,\n motifs_result[\"relGraph\"][max_z_score_key],\n file_location)\n sf_good_separate_cnt = 0\n rnd_good_separate_cnt = 0\n sf_bad_separate_cnt = 0\n rnd_bad_separate_cnt = 0\n with open(file_location + 'combinedFixed.net', 'w') as f:\n for item in rd_nx_graph_nl_lst:\n if \"ellipse\" in item:\n if \"Black\" in item:\n sf_good_separate_cnt = sf_good_separate_cnt + 1\n else:\n sf_bad_separate_cnt = sf_bad_separate_cnt + 1\n elif \"box\" in item:\n if \"Black\" in item:\n rnd_bad_separate_cnt = rnd_bad_separate_cnt + 1\n else:\n rnd_good_separate_cnt = rnd_good_separate_cnt + 1\n\n f.write(\"%s\\n\" % item)\n\n y_data = [sf_good_separate_cnt / node_cnt,\n sf_bad_separate_cnt / node_cnt,\n rnd_good_separate_cnt / node_cnt,\n rnd_bad_separate_cnt / node_cnt]\n\n x_data = [\"Correct S-F\", \"Incorrect S-F\", \"Correct Rand\", \"Incorrect Rand\"]\n bar_obj = plt.bar(x_data, y_data)\n bar_obj[0].set_color('g')\n bar_obj[1].set_color('r')\n bar_obj[2].set_color('g')\n bar_obj[3].set_color('r')\n for xy_data in zip(x_data, y_data):\n plt.annotate('{:.0%}'.format(xy_data[1]), xy=xy_data, textcoords='data')\n plt.savefig(file_location + \"Bar-\" + str(tmp_uid) + \".png\", dpi=300)\n plt.clf()\n plt.cla()\n # print(\"---------------------------- End Iteration ------------------------\\n\\n\\n\\n\")\n return y_data\n\n\nif __name__ == \"__main__\":\n node_cnt_m = 504\n p_val_m = 0.01\n edge_cnt_m = 1000\n init_scale_m = 10\n args = []\n for arg in sys.argv[1:]:\n args.append(arg)\n if not args:\n print(\"Could not Find argument\")\n exit(1)\n try:\n args[0] = int(args[0])\n except ValueError:\n print(\"Argument must be an integer \")\n print(\"Usage : main.py X (Where X is an integer)\")\n exit(2)\n\n start = time.time()\n\n if not os.path.exists(\"./Output\"):\n os.makedirs(\"./Output\")\n results = dict()\n run_rng = range(args[0])\n\n glob_itrs = len(run_rng) * 7 + 1\n glob_req_time = str(datetime.timedelta(seconds=20 * (len(run_rng))))\n print(\"Estimated Time To Complete - \" + glob_req_time)\n for i in run_rng:\n tmp = start_here(node_cnt_m, p_val_m, edge_cnt_m, init_scale_m, i + 1)\n results[i + 1] = tmp\n\n sf_good_separate_cnt_m = []\n sf_bad_separate_cnt_m = []\n rnd_good_separate_cnt_m = []\n rnd_bad_separate_cnt_m = []\n\n for k in results:\n sf_good_separate_cnt_m.append(results[k][0])\n sf_bad_separate_cnt_m.append(results[k][1])\n rnd_good_separate_cnt_m.append(results[k][2])\n rnd_bad_separate_cnt_m.append(results[k][3])\n\n y = [statistics.mean(sf_good_separate_cnt_m),\n statistics.mean(sf_bad_separate_cnt_m),\n statistics.mean(rnd_good_separate_cnt_m),\n statistics.mean(rnd_bad_separate_cnt_m)]\n x = [\"Correct S-F\", \"Incorrect S-F\", \"Correct Rand\", \"Incorrect Rand\"]\n\n barObj_m = plt.bar(x, y)\n barObj_m[0].set_color('g')\n barObj_m[1].set_color('r')\n barObj_m[2].set_color('g')\n barObj_m[3].set_color('r')\n for xy in zip(x, y):\n plt.annotate('{:.0%}'.format(xy[1]), xy=xy, textcoords='data')\n plt.savefig(\"./Output/Bar.png\", dpi=300)\n print_progress_bar()\n end = time.time()\n print(\"---------------- executing time --------------\")\n print(end - start)\n print(\"---------------- executing time --------------\")\n" ]
[ [ "matplotlib.pyplot.savefig", "matplotlib.pyplot.cla", "matplotlib.pyplot.clf", "matplotlib.pyplot.bar" ] ]
lidq92/MDTVSFA
[ "22f49a9c1b2faec4a643c92b0f6b69297f4e4121" ]
[ "VQAloss.py" ]
[ "import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass VQALoss(nn.Module):\n def __init__(self, scale, loss_type='mixed', m=None):\n super(VQALoss, self).__init__()\n self.loss_type = loss_type\n self.scale = scale\n self.m = m #\n\n def forward(self, y_pred, y):\n relative_score, mapped_score, aligned_score = y_pred\n if self.loss_type == 'mixed':\n loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) +\n F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]\n elif self.loss_type == 'correlation' or self.loss_type == 'rank+plcc':\n loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'rank':\n loss = [loss_m(relative_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'plcc':\n loss = [loss_a(mapped_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'rank+l1':\n loss = [loss_m(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]\n elif self.loss_type == 'plcc+l1':\n loss = [loss_a(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]\n elif 'naive' in self.loss_type:\n aligned_scores = torch.cat([(aligned_score[d]-self.m[d])/self.scale[d] for d in range(len(y))])\n ys = torch.cat([(y[d]-self.m[d])/self.scale[d] for d in range(len(y))])\n if self.loss_type == 'naive0':\n return F.l1_loss(aligned_scores, ys) # \n return loss_a(aligned_scores, ys) + loss_m(aligned_scores, ys) + F.l1_loss(aligned_scores, ys)\n else: # default l1\n loss = [F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]\n # print(loss)\n # sum_loss = sum([lossi for lossi in loss]) / len(loss)\n # sum_loss = len(loss) / sum([1 / lossi for lossi in loss])\n sum_loss = sum([torch.exp(lossi) * lossi for lossi in loss]) / sum([torch.exp(lossi) for lossi in loss])\n return sum_loss\n\n\ndef loss_m(y_pred, y):\n \"\"\"prediction monotonicity related loss\"\"\"\n assert y_pred.size(0) > 1 #\n return torch.sum(F.relu((y_pred-y_pred.t()) * torch.sign((y.t()-y)))) / y_pred.size(0) / (y_pred.size(0)-1)\n\n\ndef loss_a(y_pred, y):\n \"\"\"prediction accuracy related loss\"\"\"\n assert y_pred.size(0) > 1 #\n return (1 - torch.cosine_similarity(y_pred.t() - torch.mean(y_pred), y.t() - torch.mean(y))[0]) / 2\n\n" ]
[ [ "torch.mean", "torch.nn.functional.l1_loss", "torch.exp" ] ]
7FM/OpenRadar
[ "d90eea23feb062830dd71b00064f06f70ba6783c" ]
[ "mmwave/dsp/doppler_processing.py" ]
[ "# Copyright 2019 The OpenRadar Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nfrom numba import njit, jit\nfrom . import compensation\nfrom . import utils\n\n\ndef doppler_resolution(band_width, start_freq_const=77, ramp_end_time=62, idle_time_const=100, num_loops_per_frame=128,\n num_tx_antennas=3):\n \"\"\"Calculate the doppler resolution for the given radar configuration.\n\n Args:\n start_freq_const (float): Frequency chirp starting point.\n ramp_end_time (float): Frequency chirp end point.\n idle_time_const (int): Idle time between chirps.\n band_width (float): Radar config bandwidth.\n num_loops_per_frame (int): The number of loops in each frame.\n num_tx_antennas (int): The number of transmitting antennas (tx) on the radar.\n\n Returns:\n doppler_resolution (float): The doppler resolution for the given radar configuration.\n\n \"\"\"\n\n light_speed_meter_per_sec = 299792458\n\n center_frequency = start_freq_const * 1e9 + band_width / 2\n chirp_interval = (ramp_end_time + idle_time_const) * 1e-6\n doppler_resolution = light_speed_meter_per_sec / (\n 2 * num_loops_per_frame * num_tx_antennas * center_frequency * chirp_interval)\n\n return doppler_resolution\n\ndef separate_tx(signal, num_tx, vx_axis=1, axis=0):\n \"\"\"Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.\n\n Args:\n signal (ndarray): Received signal.\n num_tx (int): Number of transmit antennas.\n vx_axis (int): Axis in which to accumulate the separated data.\n axis (int): Axis in which the data is interleaved.\n\n Returns:\n ndarray: Separated received data in the\n\n \"\"\"\n # Reorder the axes\n reordering = np.arange(len(signal.shape))\n reordering[0] = axis\n reordering[axis] = 0\n if not (reordering == np.arange(len(reordering))).all(): # check if has to reorder\n signal = signal.transpose(reordering)\n\n # if signal.shape[1] != num_tx * signal.shape[1]:\n # pass\n\n out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)], axis=vx_axis)\n\n return out.transpose(reordering)\n\n\ndef doppler_processing(radar_cube,\n num_tx_antennas=2,\n clutter_removal_enabled=False,\n interleaved=True,\n window_type_2d=None,\n accumulate=True):\n \"\"\"Perform 2D FFT on the radar_cube.\n\n Interleave the radar_cube, perform optional windowing and 2D FFT on the radar_cube. Optional antenna couping\n signature removal can also be performed right before 2D FFT. In constrast to the original TI codes, CFAR and peak\n grouping are intentionally separated with 2D FFT for the easiness of debugging.\n\n Args:\n radar_cube (ndarray): Output of the 1D FFT. If not interleaved beforehand, it has the shape of\n (numChirpsPerFrame, numRxAntennas, numRangeBins). Otherwise, it has the shape of \n (numRangeBins, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler\n dimension is located at the last axis.\n num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.\n clutter_removal_enabled (boolean): Flag to enable naive clutter removal.\n interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not\n interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numRangeBins). The interleaving\n process will transform it such that it becomes (numRangeBins, numVirtualAntennas, num_doppler_bins). Note\n that this interleaving is only applicable to TDM radar, i.e. each tx emits the chirp sequentially.\n window_type_2d (mmwave.dsp.utils.Window): Optional windowing type before doppler FFT.\n accumulate (boolean): Flag to reduce the numVirtualAntennas dimension.\n \n Returns:\n detMatrix (ndarray): (numRangeBins, num_doppler_bins) complete range-dopper information. Original datatype is\n uint16_t. Note that azimuthStaticHeatMap can be extracted from zero-doppler index for\n visualization.\n aoa_input (ndarray): (numRangeBins, numVirtualAntennas, num_doppler_bins) ADC data reorganized by vrx instead of\n physical rx.\n \"\"\"\n\n if interleaved:\n # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,\n # and even are from tx2) so it becomes ( , numVirtualAntennas, numADCSamples), where \n # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.\n # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.\n fft2d_in = separate_tx(radar_cube, num_tx_antennas, vx_axis=1, axis=0)\n else:\n fft2d_in = radar_cube\n \n # (Optional) Static Clutter Removal\n if clutter_removal_enabled:\n # fft2d_in = compensation.clutter_removal(fft2d_in, axis=0)\n fft2d_in[1:] = compensation.clutter_removal(fft2d_in[1:], axis=0) # TODO this or above with static detection removal\n\n # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)\n fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))\n\n # Windowing 16x32\n if window_type_2d:\n fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=2)\n\n # It is assumed that doppler is at the last axis.\n # FFT 32x32\n fft2d_out = np.fft.fft(fft2d_in)\n aoa_input = fft2d_out\n\n # Save zero-Doppler as azimuthStaticHeatMap, watch out for the bit shift in\n # original code.\n\n # Log_2 Absolute Value\n fft2d_log_abs = np.log2(np.abs(fft2d_out))\n\n # Accumulate\n if accumulate:\n return np.sum(fft2d_log_abs, axis=1), aoa_input # TODO divide by num_rx?\n else:\n return fft2d_log_abs, aoa_input\n\n\ndef doppler_estimation(radar_cube,\n beam_weights,\n num_tx_antennas=2,\n clutter_removal_enabled=False,\n interleaved=False,\n window_type_2d=None):\n \"\"\"Perform doppler estimation on the weighted sum of range FFT output across all virtual antennas.\n \n In contrast to directly computing doppler FFT from the output of range FFT, this function combines it across all \n the virtual receivers first using the weights generated from beamforming. Then FFT is performed and argmax is taken\n across each doppler axis to return the indices of max doppler values.\n \n Args:\n radar_cube (ndarray): Output of the 1D FFT with only ranges on detected objects. If not interleaved beforehand,\n it has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). Otherwise, it has the shape of \n (numDetObjs, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler\n dimension is located at the last axis.\n beam_weights (ndarray): Weights to sum up the radar_cube across the virtual receivers. It is from the\n beam-forming and has the shape of (numVirtualAntennas, numDetObjs)\n num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.\n clutter_removal_enabled (boolean): Flag to enable naive clutter removal.\n interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not\n interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). The interleaveing process\n will transform it such that it becomes (numDetObjs, numVirtualAntennas, num_doppler_bins). Note that this\n interleaving is only appliable to TDM radar, i.e. each tx emits the chirp sequentially.\n window_type_2d (string): Optional windowing type before doppler FFT.\n \n Returns:\n doppler_est (ndarray): (numDetObjs) Doppler index for each detected objects. Positive index means moving away\n from radar while negative index means moving towards the radar.\n \"\"\"\n fft2d_in = None\n if not interleaved:\n num_doppler_bins = radar_cube.shape[0] / num_tx_antennas\n # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,\n # and even are from tx2) so it becomes (num_doppler_bins, numVirtualAntennas, numADCSamples), where\n # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.\n # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.\n if num_tx_antennas == 2:\n fft2d_in = np.concatenate((radar_cube[0::2, ...], radar_cube[1::2, ...]), axis=1)\n elif num_tx_antennas == 3:\n fft2d_in = np.concatenate((radar_cube[0::3, ...], radar_cube[1::3, ...], radar_cube[2::3, ...]), axis=1)\n\n # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)\n fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))\n else:\n num_doppler_bins = radar_cube.shape[2]\n\n # (Optional) Static Clutter Removal\n if clutter_removal_enabled:\n fft2d_in = compensation.clutter_removal(fft2d_in)\n\n # Weighted sum across all virtual receivers.\n fft2d_in = np.einsum('ijk,jk->ik', fft2d_in, beam_weights)\n\n # Windowing 16x32\n if window_type_2d:\n fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=1)\n\n # It is assumed that doppler is at the last axis.\n # FFT 32x32\n doppler_est = np.fft.fft(fft2d_in)\n doppler_est = np.argmax(doppler_est, axis=1)\n doppler_est[doppler_est[:] >= num_doppler_bins] -= num_doppler_bins * 2\n\n return doppler_est\n" ]
[ [ "numpy.sum", "numpy.fft.fft", "numpy.transpose", "numpy.abs", "numpy.argmax", "numpy.einsum", "numpy.concatenate" ] ]
amckenna41/CDBLSTM_PSP
[ "d4e5d874af65c1264c3a459ecad19e71610d1f82" ]
[ "psp/main_gcp.py" ]
[ "################################################################################\n##### Entry script for psp_gcp dir for training on Google Cloud Platform #####\n################################################################################\n\n#import required modules and dependancies\nimport tensorflow as tf\nimport argparse\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Bidirectional, LSTM, Input, Conv1D, \\\n Embedding, Dense, Dropout, Activation, Concatenate, Reshape,MaxPooling1D, Convolution1D,BatchNormalization\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.callbacks import EarlyStopping ,ModelCheckpoint, TensorBoard, \\\n ReduceLROnPlateau, LearningRateScheduler, CSVLogger\nfrom tensorflow.keras.metrics import AUC, MeanSquaredError, FalseNegatives, FalsePositives, \\\n MeanAbsoluteError, TruePositives, TrueNegatives, Precision, Recall\nfrom tensorflow.keras import activations\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.compat.v1.keras.backend import set_session\nimport os\nfrom os.path import isfile, join\nfrom os import listdir\nimport sys\nimport time\nimport importlib\nimport pkgutil\nimport json\nfrom google.cloud import storage\nfrom json.decoder import JSONDecodeError\nfrom psp.load_dataset import *\nfrom psp.plot_model import *\nfrom psp.gcp_utils import *\nfrom psp._globals import *\nfrom psp.evaluate import *\nfrom psp.models import *\nfrom psp.models.auxiliary_models import *\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=r\"Passing\", category=FutureWarning)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #reduce TF log output to only include Errors\n\n### Tensorboard parameters and configuration ###\ntf.compat.v1.reset_default_graph()\ntf.keras.backend.clear_session() # For easy reset of notebook state.\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\nconfig_proto = tf.compat.v1.ConfigProto()\ntf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)\nconfig_proto.allow_soft_placement = True\noff = rewriter_config_pb2.RewriterConfig.OFF\nconfig_proto.gpu_options.allow_growth = True\nconfig_proto.graph_options.rewrite_options.arithmetic_optimization = off\n#set tensorflow GPUOptions so TF doesn't overload GPU if present\n# config_proto.gpu_options(per_process_gpu_memory_fraction=0.333)\nsession = tf.compat.v1.Session(config=config_proto)\n\n# tf.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))\nset_session(session)\n\n#get model filenames from models and auxillary models directory\nall_models = [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models')])]\nall_models = all_models + [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models','auxiliary_models')])]\n\n#main function to train and evaluate CNN + RNN + DNN model\ndef main(args):\n \"\"\"\n Description:\n Main function for training, evaluating and plotting PSP models via GCP.\n Args:\n :args (dict): parsed input arguments.\n Returns:\n None\n \"\"\"\n #load json from config input parameters\n params = json.loads(args.params)\n gcp_params = json.loads(args.gcp_params)\n model_params = json.loads(args.model_params)\n\n #get input arguments\n config = args.config\n local = args.local\n job_dir = args.job_dir\n package_path = gcp_params[\"package_path\"]\n bucket = gcp_params[\"bucket\"]\n training_data = params[\"training_data\"]\n filtered = params[\"filtered\"]\n batch_size = int(params[\"batch_size\"])\n epochs = int(params[\"epochs\"])\n logs_path = str(params[\"logs_path\"])\n cuda = params[\"cuda\"]\n tpu = gcp_params[\"tpu\"]\n test_dataset = str(params[\"test_dataset\"])\n model_ = str(params[\"model\"])\n tf_version = tf.__version__\n lr_scheduler = str(model_params[\"lr_scheduler\"])\n callbacks = (model_params[\"callbacks\"])\n\n #if using TPU, initalise TensorFlow TPU Strategy\n if (tpu):\n tpu_strategy = setup_tpu()\n\n #initialise global GCP bucket variable\n initialise_bucket(bucket)\n\n #create data dir to store all training and test datasets\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n #create output dir to store model training output\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n #create folder where all model assets and artifacts will be stored after training\n model_output_folder = os.path.join(os.path.join(OUTPUT_DIR, model_ + '_'+ current_datetime))\n os.makedirs(model_output_folder)\n\n #create logs path directory where TensorBoard logs will be stored\n if not os.path.exists(os.path.join(model_output_folder, logs_path)):\n os.makedirs(os.path.join(model_output_folder, logs_path))\n\n #create checkpoints dir where model checkpoints will be saved\n if not os.path.exists(os.path.join(model_output_folder, 'checkpoints')):\n os.makedirs(os.path.join(model_output_folder, 'checkpoints'))\n\n #append parameters to model output results file\n model_output[\"Output Folder\"] = model_output_folder\n model_output[\"Config\"] = os.path.basename(config)\n model_output[\"Model\"] = model_\n model_output[\"Bucket\"] = bucket\n model_output[\"Training Dataset Type\"] = training_data\n model_output[\"Filtered?\"] = filtered\n model_output[\"Test Dataset\"] = test_dataset\n model_output[\"Number of epochs\"] = epochs\n model_output[\"Batch size\"] = batch_size\n model_output[\"Tensorflow Version\"] = tf_version\n model_output[\"TensorBoard logs dir\"] = os.path.join(model_output_folder, logs_path)\n model_output[\"Cuda\"] = cuda\n model_output[\"TPU\"] = tpu\n model_output[\"LR Scheduler\"] = lr_scheduler\n\n #load training dataset\n cullpdb = CullPDB(type=training_data, filtered=filtered)\n\n all_models.append(model_)\n\n #verify model specified in config parameter is an available trainable model\n if model_ not in all_models:\n raise ValueError('Model must be in available models.')\n\n #import model module from models or auxillary models folder\n if (model_!=\"psp_dcblstm_model\" and model_!=\"psp_dculstm_model\" and model_!=\"dummy_model\"):\n mod = importlib.import_module(package_path + \".models.auxiliary_models.\"+model_)\n else:\n mod = importlib.import_module(package_path + \".models.\"+model_)\n\n #build imported model with parameters from config\n model = mod.build_model(model_params)\n\n all_callbacks = []\n\n #initialise Tensorflow callbacks, append each callback if used\n if (callbacks[\"tensorboard\"]):\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir=(os.path.join(model_output_folder,\n logs_path)), histogram_freq=0, write_graph=True, write_images=True)\n all_callbacks.append(tensorboard)\n if (callbacks[\"earlyStopping\"]):\n earlyStopping = EarlyStopping(monitor='loss', patience=5, verbose=1, mode='min')\n all_callbacks.append(earlyStopping)\n if (callbacks[\"modelCheckpoint\"]):\n checkpoint = ModelCheckpoint(filepath=os.path.join(model_output_folder, 'checkpoints','model_' + current_datetime + '.h5'), \\\n verbose=1,save_best_only=True, monitor='loss', mode='min')\n all_callbacks.append(checkpoint)\n if (callbacks[\"csv_logger\"]):\n csv_logger = CSVLogger(os.path.join(model_output_folder, 'training.log'))\n all_callbacks.append(csv_logger)\n if (callbacks[\"reduceLROnPlateau\"]):\n reduceLROnPlateau = ReduceLROnPlateau(monitor=\"loss\", factor=0.1, patience=10, verbose=1, mode=\"min\")\n all_callbacks.append(reduceLROnPlateau)\n\n #get LR Scheduler callback to use from parameter in config file\n #remove any whitespace or '-' from lr_schedule name\n lr_scheduler = lr_scheduler.lower().strip().replace(\" \", \"\").replace(\"-\",\"\")\n if (lr_scheduler == \"exceptionaldecay\" or lr_scheduler == \"exponential\"):\n exponentialDecay = ExponentialDecay()\n lr_schedule = LearningRateScheduler(exponentialDecay)\n all_callbacks.append(lr_schedule)\n elif (lr_scheduler == \"timebaseddecay\" or lr_scheduler == \"timebased\"):\n timeBasedDecay = TimedBased()\n lr_schedule = LearningRateScheduler(timeBasedDecay)\n all_callbacks.append(lr_schedule)\n elif (lr_scheduler == \"stepdecay\" or lr_scheduler == \"exponential\"):\n stepDecay = StepDecay()\n lr_schedule = LearningRateScheduler(stepDecay)\n all_callbacks.append(lr_schedule)\n\n #start counter\n start = time.time()\n\n #fit model\n if cuda:\n with tf.device('/gpu:0'): #if training on GPU\n print('Fitting model...')\n history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},\n {'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},\n {'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,\n callbacks=all_callbacks,shuffle=True)\n else: #training on CPU (default) or TPU\n print('Fitting model...')\n history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},\n {'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},\n {'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,\n callbacks=all_callbacks,shuffle=True)\n\n #stop counter, calculate elapsed time\n elapsed = (time.time() - start)\n print('Elapsed Training Time: {}'.format(elapsed))\n model_output[\"Training Time\"] = elapsed\n\n #save model locally in saved models dir - create dir in this dir to store all model related objects\n print('Model saved in {} folder as {} '.format(\n os.path.dirname(model_output_folder), os.path.basename(os.path.join(model_output_folder, 'model.h5'))))\n model.save(os.path.join(model_output_folder, 'model.h5'))\n\n #save model history pickle\n history_filepath = os.path.join(model_output_folder, 'history.pckl')\n save_history(history, history_filepath)\n\n #plot model history and all metric plots\n plot_history(history.history, model_output_folder, show_histograms = False,\n show_boxplots = True, show_kde = True, filter_outliers = True)\n\n #evaluating model on test datasets\n evaluate_cullpdb(model,cullpdb)\n evaluate_model(model, test_dataset=test_dataset)\n\n #visualise Keras model and all its layers, store in png\n #Need to manually install graphviz (https://graphviz.gitlab.io/download/) etc...\n if (local==\"1\"):\n visualise_model(model, model_output_folder)\n\n #save model architecture\n with open(os.path.join(model_output_folder, \"model_architecture.json\"), \"w\") as model_arch:\n model_arch.write(model.to_json(indent=3))\n\n #getting output results from model into csv\n model_output_df = get_model_output(model_output_folder)\n\n #upload configuration json to storage bucket\n #local flag used as config file upload doesn't seem to work when training on GCP, only locally\n if (local==\"1\"):\n upload_file(os.path.join(model_output_folder,os.path.basename(config)),config)\n\n # upload model output folder and all training results and assets\n upload_directory(model_output_folder, model_output_folder)\n\n print('Model training files exported to bucket path: {}/{} '.format(bucket, model_output_folder))\n\n #append training results of current job to all results file\n append_all_output(model_output_df)\n\n #close tensorflow session\n session.close()\n\nif __name__ == \"__main__\":\n\n #############################################################\n ### PSP Input Arguments ###\n #############################################################\n\n parser = argparse.ArgumentParser(description='Protein Secondary Structure Prediction')\n\n parser.add_argument('-local', '--local', required=True,\n help='Flag to determine if job being run locally or on GCP.')\n parser.add_argument('-job-dir', '--job-dir', type=str, required=True,\n help='Directory where logs from training job are stored.')\n parser.add_argument('-config', '--config', type=str, required=True,\n help='File path to config json file.')\n parser.add_argument('-params', '--params', type=str, required=True,\n help='General training parameters')\n parser.add_argument('-gcp_params', '--gcp_params', type=str, required=True,\n help='GCP job parameters')\n parser.add_argument('-model_params', '--model_params', type=str, required=True,\n help='ML model parameters')\n\n #parse input arguments\n args = parser.parse_args()\n\n main(args)\n" ]
[ [ "tensorflow.compat.v1.reset_default_graph", "tensorflow.keras.callbacks.LearningRateScheduler", "tensorflow.compat.v1.Session", "tensorflow.compat.v1.GPUOptions", "tensorflow.compat.v1.ConfigProto", "tensorflow.device", "tensorflow.keras.backend.clear_session", "tensorflow.compat.v1.keras.backend.set_session", "tensorflow.keras.callbacks.EarlyStopping", "tensorflow.keras.callbacks.ReduceLROnPlateau" ] ]
htcr/deeplab-pytorch
[ "8cea35415112fefb6a886d0d98ab64350ed09601" ]
[ "api.py" ]
[ "import numpy as np\nimport cv2\n\nimport os\nimport os.path as osp\n\nimport torch\nimport yaml\nfrom addict import Dict\nimport matplotlib.pyplot as plt\n\nfrom .libs.models import *\nfrom .libs.utils import DenseCRF\n\nfrom demo import preprocessing, inference\n\nclass DeepLabV2Masker(object):\n def __init__(self, crf=True):\n cur_dir = osp.dirname(osp.realpath(__file__))\n \n config_path = osp.join(\n cur_dir,\n 'configs/human.yaml'\n )\n model_path = osp.join(\n cur_dir,\n 'data/models/human/deeplabv2_resnet101_msc/all_human/checkpoint_final.pth'\n )\n \n device = torch.device('cuda')\n CONFIG = Dict(yaml.load(open(config_path, 'r')))\n\n torch.set_grad_enabled(False)\n # CRF post-processor\n self.crf = crf\n if crf:\n self.postprocessor = DenseCRF(\n iter_max=CONFIG.CRF.ITER_MAX,\n pos_xy_std=CONFIG.CRF.POS_XY_STD,\n pos_w=CONFIG.CRF.POS_W,\n bi_xy_std=CONFIG.CRF.BI_XY_STD,\n bi_rgb_std=CONFIG.CRF.BI_RGB_STD,\n bi_w=CONFIG.CRF.BI_W,\n )\n else:\n self.postprocessor = None\n \n self.model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)\n self.model.load_state_dict(state_dict)\n self.model.eval()\n self.model.to(device)\n print(\"Model:\", CONFIG.MODEL.NAME)\n\n self.CONFIG = CONFIG\n self.device = device\n \n\n def get_mask(self, image, bk):\n ori_h, ori_w = image.shape[:2]\n image, raw_image = preprocessing(image, self.device, self.CONFIG)\n \n bk = cv2.resize(bk, raw_image.shape[:2][::-1])\n \n diff = np.maximum(raw_image, bk).astype(np.float32) / (np.minimum(raw_image, bk).astype(np.float32) + 0.1)\n \n diff = (diff - np.min(diff)) / (np.max(diff) - np.min(diff)) * 255\n\n diff = diff.astype(np.uint8)\n\n raw_image = diff\n\n #plt.imshow(raw_image)\n #plt.show() \n\n labelmap = inference(self.model, image, raw_image, self.postprocessor)\n mask = labelmap == 1\n mask = mask.astype(np.uint8) * 255\n mask = cv2.resize(mask, (ori_w, ori_h))\n mask = np.where(mask > 128, 255, 0).astype(np.uint8)\n return mask" ]
[ [ "torch.load", "torch.set_grad_enabled", "numpy.max", "numpy.min", "numpy.maximum", "numpy.where", "torch.device", "numpy.minimum" ] ]
alicechi2/LargeScaleCoverSongId
[ "d33a8425ce8761f09537d657d29c0e4b87e05249" ]
[ "binary_task.py" ]
[ "#!/usr/bin/env python\n\"\"\"\nBinary task of cover song identification using the Millions Song Dataset \nand the Second Hand Song dataset.\n\nIt takes the Million Song Dataset path as an argument. \n\nThe list of queries to test must be located in:\n./SHS/list_500queries.txt\n\nThe training set of the Second Hand Song dataset must be located in:\n./SHS/shs_dataset_train.txt\n\nPlease, read the README.md file for more info on how to run this code.\n\nReferences:\nBertin-Mahieux, T., & Ellis, D. P. W. (2012). Large-Scale Cover Song \nRecognition Using The 2D Fourier Transform Magnitude. In Proc. of the 13th \nInternational Society for Music Information Retrieval Conference (pp. 241-246).\nPorto, Portugal.\n\nHumphrey, E. J., Nieto, O., & Bello, J. P. (2013). Data Driven and \nDiscriminative Projections for Large-Scale Cover Song Identification. \nIn Proc. of the 14th International Society for Music Information Retrieval \nConference. Curitiba, Brazil.\n\nCreated by Thierry Bertin-Mahieux ([email protected])\nModified by Uri Nieto ([email protected])\n\n----\nThis code is distributed under the GNU LESSER PUBLIC LICENSE \n(LGPL, see www.gnu.org).\n\nCopyright (c) 2012-2013 MARL@NYU.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n a. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n b. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n c. Neither the name of MARL, NYU nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n\"\"\"\n\nimport argparse\nimport cPickle\nimport numpy as np\nimport os\nimport sys\nimport time\n\n# local stuff\nimport pca\nimport hdf5_getters as GETTERS\nimport dan_tools\nimport utils\nfrom transforms import load_transform\n\n# Thierry's original parameters for ISMIR paper\nWIN = 75\nPWR = 1.96\nPATCH_LEN = WIN*12\n\n# Set up logger\nlogger = utils.configure_logger()\n\ndef extract_feats(filename, td=None, lda_file=None, lda_n=0, ver=True):\n \"\"\"Computes the features using the dictionary transformation td. \n If it doesn't exist, computes them using Thierry's method.\n\n The improved pipeline is composed of 11 steps:\n\n 1.- Beat Synchronous Chroma\n 2.- L2-Norm\n 3.- Shingle (PATCH_LEN: 75 x 12)\n 4.- 2D-FFT\n 5.- L2-Norm\n 6.- Log-Scale\n 7.- Sparse Coding\n 8.- Shrinkage\n 9.- Median Aggregation\n 10.- Dimensionality Reduction\n 11.- L2-Norm\n\n Original method by Thierry doesn't include steps 5,6,7,8,11.\n \"\"\"\n # 1.- Beat Synchronous Chroma\n # 2.- L2-Norm\n # 3.- Shingle (PATCH_LEN: 75 x 12)\n # 4.- 2D-FFT\n feats = utils.extract_feats(filename)\n if feats is None:\n return None\n\n if td is not None:\n # 5.- L2-Norm\n # 6.- Log-Scale\n # 7.- Sparse Coding\n # 8.- Shrinkage\n H = td(feats)\n else:\n H = feats\n\n #. 9.- Median Aggregation\n H = np.median(H, axis=0)\n\n # Apply LDA if needed\n if lda_file is not None:\n # 10.- Dimensionality Reduction\n H = lda_file[lda_n].transform(H)\n\n # 11.- L2-Norm\n feats = dan_tools.chromnorm(H.reshape(H.shape[0], 1)).squeeze()\n\n return feats\n\n\ndef read_query_file(queriesf):\n \"\"\"Read queries, return triplets (query/good/bad).\"\"\"\n queries = []\n triplet = []\n f = open(queriesf, 'r')\n for line in f.xreadlines():\n if line == '' or line.strip() == '':\n continue\n if line[0] == '#':\n continue\n if line[0] == '%':\n assert len(triplet) == 0 or len(triplet) == 3\n if len(triplet) > 0:\n queries.append(triplet)\n triplet = []\n continue\n tid = line.strip()\n assert len(tid) == 18 and tid[:2] == 'TR'\n triplet.append(tid)\n assert len(triplet) == 3\n queries.append(triplet)\n f.close()\n logger.info('Found %d queries from file %s' % (len(queries), queriesf))\n return queries\n\n\ndef main():\n # Args parser\n parser = argparse.ArgumentParser(description=\n \"Evaluates the 500 binary queries from the SHS data set\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"msd_dir\", action=\"store\",\n help=\"Million Song Dataset main directory\")\n parser.add_argument(\"-dictfile\", action=\"store\", default=\"\",\n help=\"Pickle to the learned dictionary\")\n parser.add_argument(\"-lda\", action=\"store\", nargs=2, default=[None,0], \n help=\"LDA file and version\", metavar=('lda.pkl', 'n'))\n parser.add_argument(\"-pca\", nargs=2, metavar=('f.pkl', 'n'), \n default=(\"\", 0),\n help=\"pca model saved in a pickle file, \" \\\n \"use n dimensions\")\n # Parse\n args = parser.parse_args()\n\n # Track time\n start_time = time.time()\n\n maindir = args.msd_dir\n queriesf = \"SHS/list_500queries.txt\"\n shsf = \"SHS/shs_dataset_train.txt\"\n lda = args.lda[0]\n lda_n = int(args.lda[1])\n pcafile = args.pca[0]\n pcadim = int(args.pca[1])\n\n # sanity cheks\n utils.assert_file(maindir)\n utils.assert_file(queriesf)\n utils.assert_file(shsf)\n utils.assert_file(pcafile)\n\n # read queries\n queries = read_query_file(queriesf)\n\n # load pca\n trainedpca = None\n if pcafile != \"\":\n f = open(pcafile, 'r')\n trainedpca = cPickle.load(f)\n f.close()\n assert pcadim > 0\n logger.info('trained pca loaded')\n\n # load lda\n if lda != None:\n lda = utils.load_pickle(lda)\n\n # to keep stats\n results = []\n\n # iterate over queries\n logger.info(\"Starting the binary task...\")\n\n # Get the dictionary transform\n td = load_transform(args.dictfile)\n\n for triplet in queries:\n # get features\n filenames = map(lambda tid: utils.path_from_tid(maindir, tid), triplet)\n triplet_feats = map(lambda f: extract_feats(f, td=td, \n lda_file=lda, lda_n=lda_n), filenames)\n if None in triplet_feats:\n continue\n\n # Apply pca if needed\n if trainedpca:\n triplet_feats = map(lambda feat: \\\n trainedpca.apply_newdata(feat, ndims=pcadim),\n triplet_feats)\n assert triplet_feats[np.random.randint(3)].shape[0] == pcadim\n \n # Compute result\n res1 = triplet_feats[0] - triplet_feats[1]\n res1 = np.sum(res1 * res1)\n res2 = triplet_feats[0] - triplet_feats[2]\n res2 = np.sum(res2 * res2)\n if res1 < res2:\n results.append(1)\n else:\n results.append(0)\n\n # verbose\n if len(results) % 5 == 0:\n logger.info(' --- after %d queries, accuracy: %.1f %%' % \\\n (len(results), 100. * np.mean(results)))\n # done\n logger.info('After %d queries, accuracy: %.1f %%' % (len(results),\n 100. * np.mean(results)))\n logger.info('Done! Took %.2f seconds' % (time.time() - start_time))\n\nif __name__ == '__main__':\n main()\n" ]
[ [ "numpy.sum", "numpy.random.randint", "numpy.median", "numpy.mean" ] ]
bmorris3/mosfire_wasp6
[ "df802640eeb717a649c18caa1e940b684eeb99dc" ]
[ "analysis/moar/samples.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 09:33:04 2015\n\n@author: bmmorris\n\"\"\"\n\nimport numpy as np\nimport triangle\nfrom matplotlib import pyplot as plt\n\ndef splitchain(infile, outfile, tossfraction=0.9):\n '''\n Take the last `savefraction` of file `infile`, save it as the\n smaller file `outfile`.\n '''\n with open(outfile, 'w') as out:\n with open(infile, 'r') as f:\n alllines = f.readlines()\n lastXpercent = int(tossfraction*len(alllines))\n shortlines = alllines[lastXpercent:]\n out.write(''.join(shortlines))\n\ndef loadchains(directory, file='chains.dat', burnin=0.0):\n '''\n Load chains in `directory` saved to text file `file`, eliminate\n burn in fraction `burnin`\n '''\n\n chains = np.loadtxt(directory+file)\n burnin = int(burnin*chains.shape[0])\n lnp = chains[burnin:, 1]\n samples = chains[burnin:, 2:]\n return lnp, samples\n \nclass emceesamples(object):\n def __init__(self, samples, labels, dtypes, Nbins, Nlightcurves):\n '''\n Input the samples, output from loadchains(), labels for each parameter\n and data types for each parameter according to the following format: \n \n 'o' = orbital parameter\n 'l' = (L) limb darkening\n 't' = transit parameters particular to each spectral bin\n 'w' = white noise hyperparameters\n 'r' = red noise hyperparameters\n 'a' = airmass\n 'R' = radius\n 'F' = out of transit flux\n \n '''\n \n self.samples = samples\n self.labels = labels\n self.dtypes = dtypes\n self.Nbins = Nbins\n self.Nlightcurves = Nlightcurves\n self.white = None\n self.red = None\n \n self.getld()\n self.getRpRs()\n self.getF0()\n self.getorb()\n self.getam()\n if 'w' in self.dtypes:\n self.getwhite()\n if 'r' in self.dtypes:\n self.getred()\n \n def getwhite(self):\n whiteinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'w']\n self.white = self.samples[:,whiteinds]\n self.whitelabels = len(whiteinds)*['w']\n\n def getred(self):\n redinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'r']\n self.red = self.samples[:,redinds]\n\n def getld(self):\n ldinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'l']\n self.ld = self.samples[:,ldinds]\n self.ldlabels = [label for i, label in enumerate(self.labels) \n if i in ldinds] \n\n def getorb(self):\n orbinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'o']\n self.orb = self.samples[:,orbinds]\n self.orblabels = [label for i, label in enumerate(self.labels) \n if i in orbinds] \n\n def getRpRs(self):\n RpRsinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'R']\n self.RpRs = self.samples[:,RpRsinds]\n self.RpRslabels = [label for i, label in enumerate(self.labels) \n if i in RpRsinds] \n\n def getF0(self):\n F0inds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'F']\n self.F0 = self.samples[:,F0inds]\n self.F0labels = [label for i, label in enumerate(self.labels) \n if i in F0inds] \n\n def getam(self):\n aminds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'a']\n self.am = self.samples[:,aminds]\n self.amlabels = [label for i, label in enumerate(self.labels) \n if i in aminds] \n\n def triangles(self, directory=None, wavelengths=None, show=False):\n '''\n Create triangle plots. If directory is not None, save plots in that \n directory.\n '''\n \n if wavelengths is None:\n wavelengths = np.arange(self.Nlightcurves)\n \n # Orbital parameters \n Norbparams = len(self.orblabels)\n trifig1, ax = plt.subplots(Norbparams, Norbparams, figsize=(10, 10))\n kwargs = dict(fig=trifig1, plot_datapoints=False, \n labels=self.orblabels)\n fig1 = triangle.corner(self.orb, **kwargs) \n trifig1.suptitle('Orbital Parameters', size=20)\n if directory is not None:\n trifig1.savefig(directory+'triangle_orbit.png',bbox_inches='tight')\n if not show:\n plt.clf()\n \n # Plot Limb darkening parameters\n for i in range(0, len(self.ldlabels), 2):\n trifigLD, ax = plt.subplots(2, 2, figsize=(6, 6))\n kwargs = dict(fig=trifigLD, plot_datapoints=False, \n labels=self.ldlabels[i:i+2])\n fig2 = triangle.corner(self.ld[:,i:i+2], \n labelspace=False, **kwargs) \n trifigLD.suptitle('LD Parameters', size=20)\n if directory is not None:\n trifigLD.savefig(directory+'triangle_ld{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf()\n \n # Plot Limb darkening parameters \n for i in range(0, len(self.ldlabels), 2):\n trifigLD, ax = plt.subplots(2, 2, figsize=(6, 6))\n kwargs = dict(fig=trifigLD, plot_datapoints=False, \n labels=self.ldlabels[i:i+2])\n fig2 = triangle.corner(self.ld[:,i:i+2], \n labelspace=False, **kwargs) \n trifigLD.suptitle('LD Parameters', size=20)\n if directory is not None:\n trifigLD.savefig(directory+'triangle_ld{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf() \n \n \n # Plot RpRs, F0, white noise\n for i in range(len(self.RpRslabels)):\n if i < self.Nbins:\n trifig, ax = plt.subplots(4, 4, figsize=(6, 6))\n kwargs = dict(fig=trifig, plot_datapoints=False, \n labels=[self.RpRslabels[i], self.F0labels[i],\n self.whitelabels[i], self.amlabels[i]])\n testsamples = np.vstack([self.RpRs[:,i],\n self.F0[:,i],\n self.white[:,i],\n self.am[:,i]]).T\n else:\n trifig, ax = plt.subplots(3, 3, figsize=(6, 6))\n kwargs = dict(fig=trifig, plot_datapoints=False, \n labels=[self.RpRslabels[i], self.F0labels[i],\n self.whitelabels[i]])\n testsamples = np.vstack([self.RpRs[:,i],\n self.F0[:,i],\n self.white[:,i]]).T\n\n fig2 = triangle.corner(testsamples, labelspace=True, **kwargs) \n trifig.suptitle('{0:.3f}$\\mu m$'.format(wavelengths[i]), size=20)\n if directory is not None:\n trifig.savefig(directory+'triangle_RpRs{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf() \n \n if show:\n plt.show()\n else:\n plt.clf()\n \n \n \n \n \n " ]
[ [ "numpy.vstack", "matplotlib.pyplot.clf", "matplotlib.pyplot.subplots", "numpy.arange", "matplotlib.pyplot.show", "numpy.loadtxt" ] ]
JasonQSY/Associative3D
[ "c50818b593ec48c38ed7ee3e109c23531089da32", "c50818b593ec48c38ed7ee3e109c23531089da32" ]
[ "object_branch/benchmark/suncg/evaluate_detection.py", "object_branch/benchmark/nyu/box3d.py" ]
[ "# ---------------------------------------------------------\n# Copyright (c) 2015, Saurabh Gupta\n#\n# Licensed under The MIT License [see LICENSE for details]\n# ---------------------------------------------------------\nfrom ...utils import bbox_utils\nimport numpy as np\n\ndef inst_bench_image(dt, gt, bOpts, overlap = None):\n\n nDt = len(dt['sc'])\n nGt = len(gt['diff'])\n numInst = np.sum(gt['diff'] == False)\n\n if overlap is None:\n overlap = bbox_utils.bbox_overlaps(dt['boxInfo'].astype(np.float), gt['boxInfo'].astype(np.float))\n # assert(issorted(-dt.sc), 'Scores are not sorted.\\n');\n sc = dt['sc'];\n\n det = np.zeros((nGt,1)).astype(np.bool)\n tp = np.zeros((nDt,1)).astype(np.bool)\n fp = np.zeros((nDt,1)).astype(np.bool)\n dupDet = np.zeros((nDt,1)).astype(np.bool)\n instId = np.zeros((nDt,1)).astype(np.int32)\n ov = np.zeros((nDt,1)).astype(np.float32)\n\n # Walk through the detections in decreasing score\n # and assign tp, fp, fn, tn labels\n for i in range(nDt):\n # assign detection to ground truth object if any\n if nGt > 0:\n maxOverlap = overlap[i,:].max(); maxInd = overlap[i,:].argmax();\n instId[i] = maxInd; ov[i] = maxOverlap;\n else:\n maxOverlap = 0; instId[i] = -1; maxInd = -1;\n # assign detection as true positive/don't care/false positive\n if maxOverlap >= bOpts['minoverlap']:\n if gt['diff'][maxInd] == False:\n if det[maxInd] == False:\n # true positive\n tp[i] = True;\n det[maxInd] = True;\n else:\n # false positive (multiple detection)\n fp[i] = True;\n dupDet[i] = True;\n else:\n # false positive\n fp[i] = True;\n return tp, fp, sc, numInst, dupDet, instId, ov\n\n\ndef inst_bench(dt, gt, bOpts, tp=None, fp=None, score=None, numInst=None):\n \"\"\"\n ap, rec, prec, npos, details = inst_bench(dt, gt, bOpts, tp = None, fp = None, sc = None, numInst = None)\n dt - a list with a dict for each image and with following fields\n .boxInfo - info that will be used to cpmpute the overlap with ground truths, a list\n .sc - score\n gt\n .boxInfo - info used to compute the overlap, a list\n .diff - a logical array of size nGtx1, saying if the instance is hard or not\n bOpt\n .minoverlap - the minimum overlap to call it a true positive\n [tp], [fp], [sc], [numInst]\n Optional arguments, in case the inst_bench_image is being called outside of this function\n \"\"\"\n details = None\n if tp is None:\n # We do not have the tp, fp, sc, and numInst, so compute them from the structures gt, and out\n tp = []; fp = []; numInst = []; score = []; dupDet = []; instId = []; ov = [];\n for i in range(len(gt)):\n # Sort dt by the score\n sc = dt[i]['sc']\n bb = dt[i]['boxInfo']\n ind = np.argsort(sc, axis = 0);\n ind = ind[::-1]\n if len(ind) > 0:\n sc = np.vstack((sc[i,:] for i in ind))\n bb = np.vstack((bb[i,:] for i in ind))\n else:\n sc = np.zeros((0,1)).astype(np.float)\n bb = np.zeros((0,4)).astype(np.float)\n\n dtI = dict({'boxInfo': bb, 'sc': sc})\n tp_i, fp_i, sc_i, numInst_i, dupDet_i, instId_i, ov_i = inst_bench_image(dtI, gt[i], bOpts)\n tp.append(tp_i); fp.append(fp_i); score.append(sc_i); numInst.append(numInst_i);\n dupDet.append(dupDet_i); instId.append(instId_i); ov.append(ov_i);\n details = {'tp': list(tp), 'fp': list(fp), 'score': list(score), 'dupDet': list(dupDet),\n 'numInst': list(numInst), 'instId': list(instId), 'ov': list(ov)}\n\n tp = np.vstack(tp[:])\n fp = np.vstack(fp[:])\n sc = np.vstack(score[:])\n\n cat_all = np.hstack((tp,fp,sc))\n ind = np.argsort(cat_all[:,2])\n cat_all = cat_all[ind[::-1],:]\n tp = np.cumsum(cat_all[:,0], axis = 0);\n fp = np.cumsum(cat_all[:,1], axis = 0);\n thresh = cat_all[:,2];\n npos = np.sum(numInst, axis = 0);\n\n # Compute precision/recall\n rec = tp / npos;\n prec = np.divide(tp, (fp+tp));\n ap = VOCap(rec, prec);\n return ap, rec, prec, npos, details\n\ndef VOCap(rec, prec):\n rec = rec.reshape(rec.size,1); prec = prec.reshape(prec.size,1)\n z = np.zeros((1,1)); o = np.ones((1,1));\n mrec = np.vstack((z, rec, o))\n mpre = np.vstack((z, prec, z))\n for i in range(len(mpre)-2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i+1])\n\n I = np.where(mrec[1:] != mrec[0:-1])[0]+1;\n ap = 0;\n for i in I:\n ap = ap + (mrec[i] - mrec[i-1])*mpre[i];\n return ap\n", "\n\"\"\"Script for dwr prediction benchmarking.\n\"\"\"\n# Sample usage:\n# (shape_ft) : python -m factored3d.benchmark.suncg.dwr --num_train_epoch=1 --name=dwr_shape_ft --classify_rot --pred_voxels=True --use_context --save_visuals --visuals_freq=50 --eval_set=val --suncg_dl_debug_mode --max_eval_iter=20\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl import app\nfrom absl import flags\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport time\nimport scipy.misc\nimport pdb\nimport copy\nimport json\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport time\nimport random\nfrom ...data import nyu as nyu_data\nfrom ..suncg import evaluate_detection\nfrom ...utils import bbox_utils\nfrom ...utils import suncg_parse\nfrom ...utils import nyu_parse\nfrom ...nnutils import test_utils\nfrom ...nnutils import net_blocks\nfrom ...nnutils import loss_utils\nfrom ...nnutils import oc_net\nfrom ...nnutils import disp_net\nfrom ...utils import metrics\nfrom ...utils import visutil\nfrom ...renderer import utils as render_utils\nfrom ...utils import quatUtils\nimport cv2\nfrom ...utils import transformations\nfrom collections import Counter\nfrom six.moves import cPickle as pickle\nimport collections\n\nq2e = quatUtils.convert_quat_to_euler\n\ncurr_path = osp.dirname(osp.abspath(__file__))\ncache_path = osp.join(curr_path, '..', '..', 'cachedir')\nflags.DEFINE_string('rendering_dir', osp.join(cache_path, 'rendering'),\n 'Directory where intermittent renderings are saved')\n\nflags.DEFINE_integer('voxel_size', 32, 'Spatial dimension of shape voxels')\nflags.DEFINE_integer('n_voxel_layers', 5, 'Number of layers ')\nflags.DEFINE_integer('voxel_nc_max', 128, 'Max 3D channels')\nflags.DEFINE_integer('voxel_nc_l1', 8, 'Initial shape encder/decoder layer dimension')\nflags.DEFINE_float('voxel_eval_thresh', 0.25, 'Voxel evaluation threshold')\nflags.DEFINE_string('id', 'default', 'Plot string')\n\nflags.DEFINE_string('shape_pretrain_name', 'object_autoenc_32', 'Experiment name for pretrained shape encoder-decoder')\nflags.DEFINE_integer('shape_pretrain_epoch', 800, 'Experiment name for shape decoder')\n\nflags.DEFINE_integer('max_rois', 100, 'If we have more objects than this per image, we will subsample.')\nflags.DEFINE_integer('max_total_rois', 100, 'If we have more objects than this per batch, we will reject the batch.')\nflags.DEFINE_integer('num_visuals', 200, 'Number of renderings')\nflags.DEFINE_boolean('preload_stats', False, 'Reload the stats for the experiment')\nflags.DEFINE_string('layout_name', 'layout_pred', 'Experiment name for layout predictor')\nflags.DEFINE_integer('layout_train_epoch', 8, 'Experiment name for layout predictor')\nflags.DEFINE_boolean('use_gt_voxels', True, 'Use gt_voxels_for_prediction')\nflags.DEFINE_string('ovis_ids_filename', None, 'Ids to visualize output file')\nflags.DEFINE_string('ivis_ids_filename', None, 'Ids to visualize output file')\nflags.DEFINE_string('results_name', None, 'results_name')\nflags.DEFINE_boolean('gt_updates', False, 'Use gt_relative updates')\nflags.DEFINE_boolean('do_updates', True, 'Do opt updates')\nflags.DEFINE_string('index_file', None, 'file containing house names and view ids')\nflags.DEFINE_string('log_csv', None, 'file containing relative acc data')\nflags.DEFINE_boolean('draw_vis', False, 'Do not evaluate only draw visualization')\nflags.DEFINE_boolean('load_predictions_from_disk', False, 'Load pkl files')\nflags.DEFINE_boolean('save_predictions_to_disk', True, 'Save pkl files')\nflags.DEFINE_float('lambda_weight', 1.0, 'lambda for rotation')\nflags.DEFINE_float('split_size', 1.0, 'Split size of the train set')\nflags.DEFINE_boolean('only_pairs', True, 'Train with only more than 2 examples per ')\nflags.DEFINE_boolean('dwr_model', False, 'Load a dwr mode ')\n\nFLAGS = flags.FLAGS\n\nEP_box_iou_thresh = [0.5, 0.5, 0.5, 0.5, 0., 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, ]\nEP_rot_delta_thresh = [30., 30., 400., 30., 30., 30., 400., 30., 400., 400., 400., 30, ]\nEP_trans_delta_thresh = [1., 1., 1., 1000., 1, 1., 1000., 1000., 1.0, 1000., 1000., 1000., ]\nEP_shape_iou_thresh = [0.25, 0, 0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0.25, 0, 0.25, ]\nEP_scale_delta_thresh = [0.5, 0.5, 0.5, 0.5, 0.5, 100., 100., 100, 100, 100, 0.5, 100, ]\nEP_ap_str = ['all', '-shape', '-rot', '-trans', '-box2d', '-scale', 'box2d',\n 'box2d+rot', 'box2d+trans', 'box2d+shape', 'box2d+scale', 'box2d+rot+shape', ]\n\n\ndef my_print(tensor):\n try:\n print(np.round(tensor.numpy(),2))\n except:\n print(np.round(tensor, 2))\n return\n\nclass DWRTester(test_utils.Tester):\n\n def define_model(self):\n '''\n Define the pytorch net 'model' whose weights will be updated during training.\n '''\n self.eval_shape_iou = False\n opts = self.opts\n self.object_class2index = {'bed' : 1, 'sofa' :2, 'table' :3, \n 'chair':4 , 'desk':5, 'television':6,\n }\n\n self.index2object_class = {1: 'bed', 2 :'sofa', 3 : 'table', \n 4 :'chair', 5 : 'desk', 6 : 'television',\n }\n\n self.voxel_encoder, nc_enc_voxel = net_blocks.encoder3d(\n opts.n_voxel_layers, nc_max=opts.voxel_nc_max, nc_l1=opts.voxel_nc_l1, nz_shape=opts.nz_shape)\n\n self.voxel_decoder = net_blocks.decoder3d(\n opts.n_voxel_layers, opts.nz_shape, nc_enc_voxel, nc_min=opts.voxel_nc_l1)\n\n self.model = oc_net.OCNet(\n (opts.img_height, opts.img_width), opts=self.opts,\n roi_size=opts.roi_size,\n use_context=opts.use_context, nz_feat=opts.nz_feat,\n pred_voxels=False, nz_shape=opts.nz_shape, pred_labels=True, pred_graph=opts.pred_graph,\n classify_rot=opts.classify_rot, nz_rot=opts.nz_rot, n_g_layers=opts.n_g_layers,)\n #\n\n if opts.pred_voxels and opts.dwr_model:\n self.model.code_predictor.shape_predictor.add_voxel_decoder(\n copy.deepcopy(self.voxel_decoder))\n\n if opts.dwr_model:\n # self.opts.num_train_epoch=1\n self.model.add_label_predictor()\n self.eval_shape_iou = True\n opts.use_gt_voxels = False\n\n self.load_network(self.model, 'pred', self.opts.num_train_epoch)\n \n if not opts.dwr_model:\n self.model.add_label_predictor()\n \n self.model.eval()\n self.model = self.model.cuda()\n # self.model = self.model.cuda(device=self.opts.gpu_id)\n\n if opts.pred_voxels and (not opts.dwr_model):\n self.voxel_decoder = copy.deepcopy(self.model.code_predictor.shape_predictor.decoder)\n\n self.layout_model = disp_net.dispnet()\n network_dir = osp.join(opts.cache_dir, 'snapshots', opts.layout_name)\n self.load_network(\n self.layout_model, 'pred', opts.layout_train_epoch, network_dir=network_dir)\n # self.layout_model.eval()\n # self.layout_model = self.layout_model.cuda(device=self.opts.gpu_id)\n\n return\n\n def init_dataset(self):\n opts = self.opts\n self.resnet_transform = torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n split_dir = osp.join(opts.nyu_dir, 'splits')\n self.split = nyu_parse.get_split(split_dir, image_names=os.listdir(osp.join(opts.nyu_dir, 'images')))\n # houses_splits = self.split[opts.eval_set]\n if opts.eval_set == 'train':\n rng = np.random.RandomState(10) \n rng.shuffle(self.split[opts.eval_set])\n len_splitset = int(len(self.split[opts.eval_set])*opts.split_size)\n self.split[opts.eval_set] = self.split[opts.eval_set][0:len_splitset]\n # print(self.split[opts.eval_set])\n\n self.dataloader = nyu_data.nyu_data_loader_benchmark(self.split[opts.eval_set], opts)\n\n if opts.voxel_size < 64:\n self.downsample_voxels = True\n self.downsampler = render_utils.Downsample(\n 64 // opts.voxel_size, use_max=True, batch_mode=True\n ).cuda()\n else:\n self.downsampler = None\n\n if opts.classify_rot:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids.mat'))['medoids']).type(torch.FloatTensor)\n\n if not opts.pred_voxels:\n network_dir = osp.join(opts.cache_dir, 'snapshots', opts.shape_pretrain_name)\n self.load_network(\n self.voxel_decoder,\n 'decoder', opts.shape_pretrain_epoch, network_dir=network_dir)\n self.voxel_decoder.eval()\n self.voxel_decoder = self.voxel_decoder.cuda()\n\n self.spatial_image = Variable(nyu_data.define_spatial_image(opts.img_height_fine, opts.img_width_fine, 1.0/16).unsqueeze(0).cuda()) ## (1, 2, 30, 40)\n \n if opts.classify_rot:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids.mat'))['medoids']).type(torch.FloatTensor)\n if opts.nz_rot == 48:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_48.mat'))['medoids']).type(torch.FloatTensor)\n\n nz_rel_rot = opts.nz_rel_rot\n self.quat_medoids_relative = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_relative_{}_new.mat'.format(nz_rel_rot)))['medoids']).type(torch.FloatTensor)\n assert len(self.quat_medoids_relative) == opts.nz_rel_rot, ' Relative rotation architecture does not match'\n # self.quat_medoids_relative = torch.from_numpy(\n # scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_relative.mat'))['medoids']).type(torch.FloatTensor)\n self.quat_medoids_var = None\n\n # define the nearest bin metric?\n # n_absoulte_bins = len(self.quat_medoids)\n # quatsA = self.quat_medoids.unsqueeze(1).expand(torch.Size(\n # [n_absoulte_bins, n_absoulte_bins, 4])).contiguous().view(-1, 4)\n # quatsB = self.quat_medoids.unsqueeze(0).expand(torch.Size(\n # [n_absoulte_bins, n_absoulte_bins, 4])).contiguous().view(-1, 4)\n # quatsA_conjugate = quatUtils.quat_conjugate(quatsA)\n # relative_quats = quatUtils.rotate_quat(quatsB, quatsA_conjugate)\n # self.relative_quats_binids = suncg_parse.quats_to_bininds(relative_quats, self.quat_medoids_relative)\n # self.relative_quats_binids = self.relative_quats_binids.view(n_absoulte_bins, n_absoulte_bins)\n if opts.classify_dir:\n self.direction_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'direction_medoids_relative_{}_new.mat'.format(opts.nz_rel_dir)))['medoids']).type(torch.FloatTensor)\n self.direction_medoids = torch.nn.functional.normalize(self.direction_medoids)\n\n self.data_vis = []\n self.stored_quat_relative_gt_classes = []\n self.stored_quat_relative_pred_classes = []\n self.rotation_bins = []\n self.translation = []\n self.pred_translation = []\n self.pred_rotation = []\n self.pred_relative_directions = []\n self.relative_directions =[]\n return\n\n def decode_shape(self, pred_shape):\n opts = self.opts\n if opts.use_gt_voxels:\n # assert pred_shape.size() == self.codes_gt[0].size(), 'predict size from gt incorrect'\n return self.codes_gt['shape'].clone()\n\n pred_shape = torch.nn.functional.sigmoid(self.voxel_decoder.forward(pred_shape))\n return pred_shape\n\n def decode_rotation(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids), requires_grad=False)\n return pred_rot\n\n def decode_rotation_topk(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.topk(pred_rot.data.cpu(), k=2, dim=1)\n bin_inds = bin_inds.view(-1, 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids), requires_grad=False)\n pred_rot = pred_rot.view(-1, 2, 4)\n return pred_rot\n\n def get_class_indices(self, pred_rot):\n opts = self.opts\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n return bin_inds\n\n def decode_rotation_relative(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids_relative), requires_grad=False)\n return pred_rot\n\n def decode_class(self, pred_class):\n opts = self.opts\n # pdb.set_trace()\n _, bin_inds = torch.max(pred_class.data.cpu(), 1)\n return bin_inds\n\n def count_number_pairs(self, rois):\n counts = Counter(rois[:,0].numpy().tolist())\n pairs = sum([v*v for (k,v) in counts.items() if v > 1])\n return pairs\n\n def set_input(self, batch):\n opts = self.opts\n if batch is None or not batch:\n self.invalid_batch = True\n self.invalid_rois = None\n return\n\n if batch['empty']:\n self.invalid_rois = None\n self.invalid_batch = True\n return\n\n bboxes_gt = suncg_parse.bboxes_to_rois(batch['bboxes'])\n # bboxes_proposals = suncg_parse.bboxes_to_rois(batch['bboxes_test_proposals'])\n bboxes_proposals = bboxes_gt\n rois = bboxes_proposals\n if rois.numel() <= 0 or bboxes_gt.numel() <= 0: # some proposals and gt objects should be there\n self.invalid_batch = True\n self.invalid_rois = None\n return\n else:\n if bboxes_gt.numel() == 5 and self.opts.only_pairs: \n self.invalid_rois = None\n self.invalid_batch = True\n return\n # if bboxes_gt.numel() > 8 * 5:\n # self.invalid_batch = True\n # return\n pairs = self.count_number_pairs(rois)\n # if pairs <= 1:\n # self.invalid_batch = True\n # self.invalid_rois = None\n # return\n self.invalid_batch = False\n\n self.image_names = batch['image_name']\n # Inputs for prediction\n if self.opts.load_predictions_from_disk:\n return\n\n\n\n input_imgs_fine = batch['img_fine'].type(torch.FloatTensor)\n input_imgs = batch['img'].type(torch.FloatTensor)\n\n self.input_imgs_layout = Variable(\n input_imgs.cuda(), requires_grad=False)\n\n for b in range(input_imgs_fine.size(0)):\n input_imgs_fine[b] = self.resnet_transform(input_imgs_fine[b])\n input_imgs[b] = self.resnet_transform(input_imgs[b])\n\n self.input_imgs = Variable(\n input_imgs.cuda(), requires_grad=False)\n\n self.input_imgs_fine = Variable(\n input_imgs_fine.cuda(), requires_grad=False)\n\n self.rois = Variable(\n rois.type(torch.FloatTensor).cuda(), requires_grad=False)\n\n\n code_tensors = suncg_parse.collate_codes(batch['codes'])\n code_tensors_quats = code_tensors['quat']\n self.amodal_bboxes = code_tensors['amodal_bbox']\n object_classes = code_tensors['class'].type(torch.LongTensor)\n self.class_gt = self.object_classes = Variable(object_classes.cuda(), requires_grad=False)\n \n self.object_locations = suncg_parse.batchify(code_tensors['trans'], self.rois[:, 0].data.cpu())\n code_tensors['shape'] = code_tensors['shape'].unsqueeze(1) # unsqueeze voxels\n \n vox2cams = code_tensors['transform_vox2cam']\n self.vox2cams = vox2cams = suncg_parse.batchify(vox2cams, self.rois[:,0].data.cpu())\n \n cam2voxs = code_tensors['transform_cam2vox']\n cam2voxs = suncg_parse.batchify(cam2voxs, self.rois[:,0].data.cpu())\n \n relative_direction_rotation = []\n relative_dir_mask = []\n for bx in range(len(cam2voxs)):\n nx = len(cam2voxs[bx])\n mask = torch.ones([nx*nx])\n for ix in range(nx):\n for jx in range(nx):\n if ix == jx:\n mask[ix*nx + jx] = 0\n directions = []\n for cam2vox in cam2voxs[bx][ix]:\n dt_trj_cam_frame = self.object_locations[bx][jx]\n direction = self.homogenize_coordinates(dt_trj_cam_frame.unsqueeze(0))\n direction = torch.matmul(cam2vox, direction.t()).t()[:,0:3]\n direction = direction/(torch.norm(direction,p=2, dim=1, keepdim=True) + 1E-5)\n directions.append(direction)\n directions = torch.cat(directions)\n relative_direction_rotation.append(directions)\n relative_dir_mask.append(mask)\n self.relative_dir_mask = Variable(torch.cat(relative_dir_mask,dim=0).byte()).cuda()\n\n assert self.opts.batch_size == 1, 'batch size > 1 not supported'\n\n if opts.classify_dir and not opts.gmm_dir:\n self.relative_direction_rotation = relative_direction_rotation\n relative_direction_rotation_binned = [suncg_parse.directions_to_bininds(t, self.direction_medoids) for t in relative_direction_rotation]\n # relative_direction_rotation_directions = [suncg_parse.bininds_to_directions(t, self.direction_medoids) for t in relative_direction_rotation_binned]\n # ## compute some average error?\n # error = torch.cat([(1 - t1*t2.sum(1)).mean() for (t1, t2) in zip(relative_direction_rotation, relative_direction_rotation_directions)]).mean()\n # # pdb.set_trace()\n self.relative_direction_rotation_binned = [Variable(t).cuda() for t in relative_direction_rotation_binned]\n\n \n self.object_locations = [Variable(t_.cuda(), requires_grad=False) for t_ in\n self.object_locations]\n\n self.object_scales = suncg_parse.batchify(code_tensors['scale'] + 1E-10, self.rois[:, 0].data.cpu())\n self.object_scales = [Variable(t_.cuda(), requires_grad=False) for t_ in\n self.object_scales]\n\n self.relative_trans_gt = []\n self.relative_scale_gt = []\n for bx in range(len(self.object_locations)):\n relative_locations = self.object_locations[bx].unsqueeze(0) - self.object_locations[bx].unsqueeze(1)\n relative_locations = relative_locations.view(-1, 3)\n self.relative_trans_gt.append(relative_locations)\n # this is in log scale.\n relative_scales = self.object_scales[bx].unsqueeze(0).log() - self.object_scales[bx].unsqueeze(1).log()\n relative_scales = relative_scales.view(-1, 3)\n self.relative_scale_gt.append(relative_scales)\n\n self.relative_scale_gt = torch.cat(self.relative_scale_gt, dim=0) # this is in log scale\n self.relative_trans_gt = torch.cat(self.relative_trans_gt, dim=0)\n self.relative_gt = {'relative_trans' : self.relative_trans_gt,\n 'relative_scale' : self.relative_scale_gt,\n 'relative_dir' : self.relative_direction_rotation,\n 'relative_mask' : self.relative_dir_mask,\n }\n \n # self.layout_gt=Variable(\n # batch['layout'].cuda(), requires_grad=False)\n\n self.codes_gt_quats = [\n Variable(t.cuda(), requires_grad=False) for t in code_tensors_quats]\n codes_gt_keys = ['shape', 'scale', 'trans']\n self.codes_gt ={key : Variable(code_tensors[key].cuda(), requires_grad=False) \n for key in codes_gt_keys}\n self.codes_gt['quat'] = self.codes_gt_quats\n\n self.rois_gt=Variable(\n bboxes_gt.type(torch.FloatTensor).cuda(), requires_grad=False)\n if self.downsample_voxels:\n self.codes_gt['shape']=self.downsampler.forward(self.codes_gt['shape'])\n return\n\n def convert_multiple_bins_to_probabilites(self, bins_ids, num_medoids, no_noise=1.0):\n \n bins = [torch.LongTensor([random.choice(c.data)]) for c in bins_ids]\n noise_values = torch.bernoulli(torch.FloatTensor(len(bins)).zero_() + no_noise)\n bins = [c if n > 0.5 else torch.LongTensor([np.random.randint(num_medoids)]) for c, n in zip(bins, noise_values)]\n bins = torch.cat(bins)\n probs = torch.FloatTensor(len(bins), num_medoids).zero_()\n probs.scatter_(1, bins.unsqueeze(1), 1-0.001*num_medoids)\n probs = probs + 0.001\n return probs\n\n '''\n args\n relative_directions : list N^2, torch.Tensor K x 3\n vox2cams : list N , K x 4,4\n img_size : (H, W)\n returns:\n relative_directions in image plane N^2 K x 2 x 2\n '''\n def convert_relative_vectors_to_image_plane(self, relative_directions, vox2cams, img_size):\n def convert_vector_to_image_plane(vector, vox2cam, cam_intrinsic, img_size):\n vector_cam_frame = suncg_parse.transform_coordinates(vox2cam, vector.reshape(1, -1))\n img_frame = suncg_parse.transform_to_image_coordinates(vector_cam_frame, cam_intrinsic)\n img_frame = np.clip(img_frame, a_min=np.array([[0,0]]), a_max=np.array([[img_size[1], img_size[0]]]))\n return img_frame\n\n cam_intrinsic = suncg_parse.cam_intrinsic()\n img_vectors = []\n n_objects = len(vox2cams)\n for ix, rel_dir in enumerate(relative_directions):\n rel_dir = rel_dir[0]\n vox2cam = vox2cams[ix//n_objects][0]\n src_vector = convert_vector_to_image_plane(np.array([0,0,0]), vox2cam.numpy(), cam_intrinsic, img_size)\n trj_vector = convert_vector_to_image_plane(rel_dir.numpy(), vox2cam.numpy(), cam_intrinsic, img_size)\n img_vectors.append(np.concatenate([src_vector, trj_vector], axis=0))\n\n index = [ix*n_objects + ix for ix in range(n_objects)]\n img_vectors = [(img_vectors[ix], ix//n_objects, ix % n_objects) for ix in range(n_objects*n_objects) if ix not in index]\n return img_vectors\n\n # def save_current_stats(self, bench):\n # imgs_dir=osp.join(self.opts.results_vis_dir, 'vis_iter_{}'.format(self.vis_iter))\n # if not os.path.exists(imgs_dir):\n # os.makedirs(imgs_dir)\n # json_file=os.path.join(imgs_dir, 'bench_iter_{}.json'.format(0))\n # # print(json_file)\n # with open(json_file, 'w') as f:\n # json.dump({'bench': bench}, f)\n\n def save_layout_mesh(self, mesh_dir, layout, prefix='layout'):\n opts=self.opts\n layout_vis=layout.data[0].cpu().numpy().transpose((1, 2, 0))\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n vs, fs=render_utils.dispmap_to_mesh(\n layout_vis,\n suncg_parse.cam_intrinsic(),\n scale_x=self.opts.layout_width / 640,\n scale_y=self.opts.layout_height / 480\n )\n fout=open(mesh_file, 'w')\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n fout=open(mesh_file, 'w')\n render_utils.append_obj(fout, vs, fs)\n fout.close()\n\n def save_codes_mesh(self, mesh_dir, code_vars, prefix='codes'):\n opts=self.opts\n n_rois=code_vars['shape'].size()[0]\n code_list=suncg_parse.uncollate_codes(code_vars, self.input_imgs.data.size(0), torch.Tensor(n_rois).fill_(0))\n\n if not os.path.exists(mesh_dir):\n os.makedirs(mesh_dir)\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n new_codes_list = suncg_parse.convert_codes_list_to_old_format(code_list[0])\n render_utils.save_parse(mesh_file, new_codes_list, save_objectwise=False, thresh=0.1)\n\n def render_visuals(self, mesh_dir, obj_name=None):\n png_dir=osp.join(mesh_dir, 'rendering')\n if obj_name is not None:\n render_utils.render_mesh(osp.join(mesh_dir, obj_name + '.obj'), png_dir)\n im_view1=scipy.misc.imread(osp.join(png_dir, '{}_render_000.png'.format(obj_name)))\n # im_view2=scipy.misc.imread(osp.join(png_dir, '{}_render_003.png'.format(obj_name)))\n else:\n render_utils.render_directory(mesh_dir, png_dir)\n im_view1=scipy.misc.imread(osp.join(png_dir, 'render_000.png'))\n # im_view2=scipy.misc.imread(osp.join(png_dir, 'render_003.png'))\n\n # return im_view1, im_view2\n return im_view1\n\n\n\n def get_current_visuals(self):\n visuals={}\n opts=self.opts\n visuals['img']=visutil.tensor2im(visutil.undo_resnet_preprocess(\n self.input_imgs_fine.data))\n rois=self.rois.data\n visuals['img_roi']=render_utils.vis_detections(visuals['img'], rois[:, 1:])\n\n # img_rel_vectors_pred = self.convert_relative_vectors_to_image_plane([[x] for x in self.relative_direction_prediction_3d], \n # self.vox2cams[0], (self.opts.img_height_fine, self.opts.img_width_fine))\n # img_rel_vectors_gt = self.convert_relative_vectors_to_image_plane(self.relative_direction_rotation,\n # self.vox2cams[0], (self.opts.img_height_fine, self.opts.img_width_fine))\n # visuals['img_rel_dir_pred']=render_utils.vis_relative_dirs(visuals['img_roi'], img_rel_vectors_pred)\n # visuals['img_rel_dir_gt']=render_utils.vis_relative_dirs(visuals['img_roi'], img_rel_vectors_gt)\n\n \n # mesh_dir=osp.join(opts.rendering_dir, opts.name)\n # return visuals\n\n mesh_dir=osp.join(opts.rendering_dir)\n # vis_codes=[self.codes_pred_vis, self.codes_gt]\n vis_codes=[self.codes_pred_eval, self.codes_gt]\n # vis_codes=[self.codes_gt]\n # vis_layouts = [self.layout_pred, self.layout_gt]\n vis_names=['b_pred', 'c_gt']\n # vis_names=['c_gt']\n # vis_names=['b_pred']\n for vx, v_name in enumerate(vis_names):\n os.system('rm {}/*.obj'.format(mesh_dir))\n self.save_codes_mesh(mesh_dir, vis_codes[vx])\n # self.save_layout_mesh(mesh_dir, vis_layouts[vx])\n\n # visuals['{}_layout_cam_view'.format(v_name)], visuals['{}_layout_novel_view'.format(v_name)] = self.render_visuals(\n # mesh_dir, obj_name='layout')\n # visuals['{}_objects_cam_view'.format(v_name)], visuals['{}_objects_novel_view'.format(v_name)]=self.render_visuals(\n # mesh_dir, obj_name='codes')\n # visuals['{}_scene_cam_view'.format(v_name)], visuals['{}_scene_novel_view'.format(v_name)]=self.render_visuals(\n # mesh_dir)\n visuals['{}_objects_cam_view'.format(v_name)] =self.render_visuals(mesh_dir, obj_name='codes')\n # visuals['{}_scene_cam_view'.format(v_name)] =self.render_visuals(mesh_dir)\n return visuals\n\n\n def filter_pos(self, codes, pos_inds):\n pos_inds=torch.from_numpy(np.array(pos_inds)).squeeze()\n t = torch.LongTensor\n\n if type(codes) == dict:\n key = 'shape'\n if isinstance(codes[key], torch.autograd.Variable):\n if isinstance(codes[key].data, torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n elif isinstance(codes[key], torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n\n\n pos_inds=torch.autograd.Variable(\n pos_inds.type(t), requires_grad=False)\n filtered_codes= {k : torch.index_select(code, 0, pos_inds) for k, code in codes.items()}\n\n else:\n if isinstance(codes[0], torch.autograd.Variable):\n if isinstance(codes[0].data, torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n elif isinstance(codes[0], torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n\n pos_inds =torch.autograd.Variable(\n pos_inds.type(t), requires_grad=False)\n filtered_codes = [torch.index_select(code, 0, pos_inds) for code in codes]\n return filtered_codes\n\n\n def compute_entropy(self, log_probs):\n return np.sum(-1*np.exp(log_probs)*log_probs, axis=-1)\n\n def update_locations(self, trans_location, relative_locations):\n n_objects=trans_location.size(0)\n # lmbda = min(n_objects*n_objects, 5)\n # lmbda = max(n_objects, 5)\n lmbda=1.0\n relative_locations=relative_locations.numpy()\n trans_location=trans_location.numpy()\n A=np.zeros((n_objects * n_objects, n_objects))\n b=np.zeros((n_objects * n_objects, 3))\n index=0\n for i in range(n_objects):\n for j in range(n_objects):\n if i == j:\n continue\n # don't add the constraint if it is farther than a particular distance\n dist=np.linalg.norm(relative_locations[i * n_objects + j])\n if dist < 10:\n A[index][i]=-1\n A[index][j]=1\n b[index]=relative_locations[i * n_objects + j]\n index += 1\n for i in range(n_objects):\n A[index][i]=lmbda * 1\n b[index]=lmbda * trans_location[i]\n index += 1\n A=A[0:index]\n b=b[0:index]\n # pdb.set_trace()\n new_location=np.linalg.lstsq(A, b)\n # source_matrix = np.cat([np.zeros(n_objects-1, n_objects)\n return torch.from_numpy(new_location[0]), np.linalg.norm(new_location[0] - trans_location, axis=1).tolist()\n # return torch.from_numpy(trans_location)\n\n\n def save_predictions_to_pkl(self, dict_of_outputs):\n pkl_file_name = osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))\n \n def recursive_convert_to_numpy(elem):\n if isinstance(elem, collections.Mapping):\n return {key: recursive_convert_to_numpy(elem[key]) for key in elem}\n elif isinstance(elem, str):\n return elem\n elif isinstance(elem, collections.Sequence):\n return [recursive_convert_to_numpy(samples) for samples in elem]\n elif isinstance(elem, torch.FloatTensor):\n return elem.numpy()\n elif isinstance(elem, torch.cuda.FloatTensor):\n return elem.cpu().numpy()\n elif isinstance(elem, torch.LongTensor):\n return elem.numpy()\n elif isinstance(elem, torch.cuda.LongTensor):\n return elem.cpu().numpy()\n elif isinstance(elem, torch.autograd.Variable):\n return recursive_convert_to_numpy(elem.data)\n else:\n return elem\n\n new_dict = recursive_convert_to_numpy(dict_of_outputs)\n with open(pkl_file_name, 'wb') as f:\n pickle.dump(new_dict, f)\n\n def convert_pkl_to_predictions(self, ):\n pkl_file_name = osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))\n def recursive_convert_to_torch(elem):\n if isinstance(elem, collections.Mapping):\n return {key: recursive_convert_to_torch(elem[key]) for key in elem}\n elif isinstance(elem, str):\n return elem\n elif isinstance(elem, collections.Sequence):\n return [recursive_convert_to_torch(samples) for samples in elem]\n elif isinstance(elem, np.ndarray):\n if elem.dtype == np.int32:\n torch.from_numpy(elem).long()\n else:\n return torch.from_numpy(elem).float()\n else:\n return elem\n with open(pkl_file_name, 'rb') as f:\n predictions = pickle.load(f)\n predictions = recursive_convert_to_torch(predictions)\n predictions['gt_codes'] = [Variable(k) for k in predictions['gt_codes']]\n predictions['pred_codes'] = [Variable(k) for k in predictions['pred_codes']]\n predictions['object_class_gt'] = Variable(predictions['object_class_gt']).long()\n predictions['rois'] = Variable(predictions['rois'])\n predictions['amodal_bboxes'] = predictions['amodal_bboxes']\n predictions['codes_gt_quats'] = [Variable(t) for t in predictions['codes_gt_quats']]\n\n \n try:\n predictions['relative_trans'] = Variable(predictions['relative_trans'])\n predictions['relative_scale'] = Variable(predictions['relative_scale'])\n predictions['relative_dir'] = Variable(predictions['relative_dir'])\n predictions['relative_gt'] = [Variable(k) for k in predictions['relative_gt']]\n predictions['trans_dependent_rotation'] = [k for k in predictions['trans_dependent_rotation']]\n predictions['trans_dependent_rotation_binned'] = [Variable(k).long() for k in predictions['trans_dependent_rotation_binned']]\n predictions['relative_quat_tensors_angles_gt'] = predictions['relative_quat_tensors_angles_gt']\n \n except KeyError as e:\n assert self.opts.pred_relative == False, 'relative outputs required'\n predictions['relative_trans'] = predictions['relative_scale'] = predictions['relative_quat'] = None\n predictions['relative_dir'] = predictions['relative_gt'] = predictions['trans_dependent_rotation'] = None\n predictions['trans_dependent_rotation_binned'] = predictions['relative_quat_tensors_angles_gt'] = None\n\n try:\n predictions['codes_quat_var'] = Variable(predictions['codes_quat_var'])\n except KeyError as e:\n assert self.opts.var_gmm_rot == False, 'var gmm rots given'\n predictions['codes_quat_var'] = None\n\n return predictions\n\n\n def predict(self):\n # pdb.set_trace()\n # codes_pred_all, trj_pred_all, labels_pred = self.model.forward((self.input_imgs_fine, self.input_imgs, self.rois))\n if not self.opts.load_predictions_from_disk:\n feed_dict = {}\n feed_dict['imgs_inp_fine'] = self.input_imgs_fine\n feed_dict['imgs_inp_coarse'] = self.input_imgs\n feed_dict['rois_inp'] = self.rois\n feed_dict['location_inp'] = self.object_locations\n feed_dict['class_inp'] = self.object_classes\n feed_dict['spatial_image'] = self.spatial_image\n\n model_pred , _ =self.model.forward(feed_dict)\n\n # pdb.set_trace()\n # codes_pred_all, labels_pred = model_pred[0], model_pred[-1]\n codes_pred_all = model_pred['codes_pred']\n if self.opts.gmm_rot and self.opts.var_gmm_rot:\n self.codes_quat_var = codes_pred_all['quat'][1].data.cpu().numpy()\n codes_pred_all['quat'] = torch.nn.functional.log_softmax(codes_pred_all['quat'][0])\n else:\n codes_pred_all['quat'] = torch.nn.functional.log_softmax(codes_pred_all['quat'])\n \n stuff_to_save = {'gt_codes' : self.codes_gt,\n 'pred_codes' : codes_pred_all, \n 'object_class_gt' : self.object_classes,\n 'rois' : self.rois, \n 'index2object' : self.index2object_class,\n 'amodal_bboxes' : self.amodal_bboxes,\n 'codes_gt_quats' : self.codes_gt_quats,}\n if self.opts.gmm_rot and self.opts.var_gmm_rot:\n stuff_to_save['codes_quat_var'] = self.codes_quat_var\n\n\n if self.opts.pred_relative:\n self.relative_predictions = model_pred['codes_relative']\n self.relative_trans_predictions = self.relative_predictions['relative_trans']\n self.relative_scale_predictions = self.relative_predictions['relative_scale']\n self.relative_direction_prediction = self.relative_predictions['relative_dir']\n\n if self.opts.gmm_dir:\n self.relative_direction_prediction = self.relative_direction_prediction[0]\n\n self.relative_direction_prediction = torch.nn.functional.log_softmax(self.relative_direction_prediction)\n stuff_to_save['relative_trans' ] = self.relative_trans_predictions\n stuff_to_save['relative_scale' ] = self.relative_scale_predictions\n stuff_to_save['relative_dir' ] = self.relative_direction_prediction\n stuff_to_save['relative_gt' ] = self.relative_gt\n stuff_to_save['trans_dependent_rotation' ] = self.relative_direction_rotation\n stuff_to_save['trans_dependent_rotation_binned' ] = self.relative_direction_rotation_binned\n if self.opts.pred_class:\n self.class_pred=model_pred['class_pred']\n # self.class_pred=model_pred['class_pred']\n labels_pred=model_pred['labels_pred']\n \n if self.opts.save_predictions_to_disk:\n self.save_predictions_to_pkl(stuff_to_save)\n assert osp.exists(osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))), 'pkl file does not exist'\n\n else:\n # if self.house_names[0] == '4c5edfb056c1f38d58482a05562d8c1d':\n # pdb.set_trace()\n predictions = self.convert_pkl_to_predictions()\n self.codes_gt = tuple(predictions['gt_codes'])\n codes_pred_all = tuple(predictions['pred_codes'])\n\n self.relative_trans_predictions = predictions['relative_trans']\n self.relative_scale_predictions = predictions['relative_scale']\n self.relative_quat_predictions = predictions['relative_quat' ]\n self.relative_direction_prediction = predictions['relative_dir'] \n self.relative_predictions = [self.relative_trans_predictions, self.relative_scale_predictions, self.relative_quat_predictions, self.relative_direction_prediction]\n self.relative_gt = predictions['relative_gt']\n self.relative_direction_rotation = predictions['trans_dependent_rotation']\n self.relative_direction_rotation_binned = predictions['trans_dependent_rotation_binned']\n self.object_classes = predictions['object_class_gt']\n self.rois = predictions['rois']\n self.index2object_class = predictions['index2object']\n self.amodal_bboxes = predictions['amodal_bboxes']\n self.relative_quat_tensors_angles_gt = predictions['relative_quat_tensors_angles_gt']\n self.codes_gt_quats = predictions['codes_gt_quats']\n self.codes_quat_var = predictions['codes_quat_var']\n\n n = codes_pred_all['shape'].size(0)\n labels_pred = Variable(torch.zeros(n, 1).cuda())\n scores_pred = labels_pred.cpu().data.numpy() * 0 + 1\n bboxes_pred = self.rois.data.cpu().numpy()[:, 1:]\n min_score_eval=np.minimum(0.05, np.max(scores_pred))\n # pos_inds_eval = metrics.nms(\n # np.concatenate((bboxes_pred, scores_pred), axis=1),\n # 0.3, min_score=min_score_eval)\n \n pos_inds_eval=[i for i in range(n)]\n\n self.codes_pred_eval=self.filter_pos(codes_pred_all, pos_inds_eval)\n # pdb.set_trace()\n\n opts=self.opts\n # updates for translation\n if opts.pred_relative:\n for i in range(1):\n if not opts.gt_updates:\n new_trans, self.update_norm=self.update_locations(\n self.codes_pred_eval['trans'].data.cpu(), self.relative_trans_predictions.data.cpu())\n else:\n new_trans, self.update_norm=self.update_locations(\n self.codes_pred_eval['trans'].data.cpu(), self.relative_gt['relative_trans'].data.cpu())\n self.new_trans=new_trans\n if opts.do_updates:\n self.codes_pred_eval['trans']=Variable(new_trans)\n else:\n self.update_norm=torch.mean(self.codes_pred_eval['trans'] * 0, dim=1).data.cpu().numpy().tolist()\n\n if opts.pred_relative:\n for i in range(1):\n if not opts.gt_updates:\n new_scale, _ =self.update_locations(\n self.codes_pred_eval['scale'].data.cpu().log(), self.relative_predictions['relative_scale'].data.cpu())\n else:\n new_scale, _ =self.update_locations(\n self.codes_pred_eval['scale'].data.cpu().log(), self.relative_gt['relative_scale'].data.cpu())\n \n new_scale=new_scale.exp()\n self.new_scale=new_scale\n if opts.do_updates:\n self.codes_pred_eval['scale']=Variable(new_scale)\n\n quats_gt_binned = [suncg_parse.quats_to_bininds(q.data.cpu(), self.quat_medoids) for q in self.codes_gt['quat']]\n quats_gt_binned = [Variable(q) for q in quats_gt_binned]\n quats_gt_binned_probs = Variable(self.convert_multiple_bins_to_probabilites(quats_gt_binned, opts.nz_rot, no_noise=1.0)).log()\n \n self.codes_pred_quat_before = Variable(self.codes_pred_eval['quat'].data.clone())\n self.entropy_before_optim = (-1 * self.codes_pred_eval['quat'] * self.codes_pred_eval['quat'].exp()).sum(1).data.cpu().numpy()\n\n if opts.pred_relative and opts.classify_dir and opts.do_updates:\n if opts.gt_updates:\n relative_direction_prediction = self.convert_multiple_bins_to_probabilites(self.relative_direction_rotation_binned, opts.nz_rel_dir).log().numpy()\n self.relative_direction_prediction = Variable(torch.from_numpy(relative_direction_prediction).cuda())\n else:\n relative_direction_prediction = self.relative_direction_prediction.data.cpu().numpy()\n self.relative_direction_prediction_3d = relative_direction_prediction\n\n absolute_locations = self.codes_pred_eval['trans'].data.cpu().numpy()\n # absolute_locations = self.codes_gt[3].data.cpu().numpy()\n absolute_log_probabilites = self.codes_pred_eval['quat'].data.cpu().numpy()\n n_objects = len(absolute_log_probabilites)\n n_absoulte_bins = absolute_log_probabilites.shape[1]\n relative_direction_prediction = relative_direction_prediction.reshape(n_objects, n_objects, -1)\n n_relative_bins = relative_direction_prediction.shape[2]\n bin_scores = np.zeros((n_objects, n_objects, n_absoulte_bins))\n quat_medoids = self.quat_medoids.numpy()\n direction_medoids = self.direction_medoids.numpy()\n new_probability = absolute_log_probabilites\n # lambda_weight = opts.lambda_weight \n # lambda_weight = opts.lambda_weight * 1./np.sqrt(n_objects)\n lambda_weight = opts.lambda_weight * 1./n_objects\n adaptive_weight = np.ones(n_objects)\n # pdb.set_trace()\n\n for nx in range(n_objects):\n src_c = self.index2object_class[self.object_classes.data[nx, 0]]\n ignore_bin_scores = False\n # if src_c == 'table':\n # ignore_bin_scores = True\n # continue\n for mx in range(n_objects):\n if mx == nx:\n continue\n expected_direction = absolute_locations[mx] - absolute_locations[nx] ## make it unit norm\n dist = (1E-5 + np.linalg.norm(expected_direction))\n if dist > 4:\n continue\n\n expected_direction = expected_direction/ (1E-5 + np.linalg.norm(expected_direction))\n expected_direction = expected_direction.reshape(1, -1)\n alignment_scores = []\n indices = []\n entropy = -1*np.sum(np.exp(relative_direction_prediction[nx, mx]) * relative_direction_prediction[nx, mx])\n # if entropy > 2:\n # continue\n # adaptive_weight[nx] += 1\n # pdb.set_trace()\n for abinx in range(n_absoulte_bins):\n prob_bin = absolute_log_probabilites[nx][abinx]\n quaternion_abinx = quat_medoids[abinx]\n rotation = transformations.quaternion_matrix(quaternion_abinx)\n transform = rotation.copy()\n transform[0:3, 3] = np.array(absolute_locations[nx], copy=True)\n \n # translation = suncg_parse.trans_transform(absolute_locations[nx])\n # transform = np.matmul(rotation, translation)\n \n relative_direction = direction_medoids\n predicted_direction = suncg_parse.transform_coordinates(transform, relative_direction) -absolute_locations[nx].reshape(1, -1)\n # # log_alignment_score = (1 - np.matmul(expected_direction, predicted_direction.transpose()).squeeze()) #* relative_direction_prediction[nx, mx]\n # alignment_score = np.matmul(expected_direction, predicted_direction.transpose()).squeeze()\n # alignment_score = (alignment_score > 0.95) * alignment_score\n # alignment_score = alignment_score * np.exp(relative_direction_prediction[nx, mx])\n # alignment_score = np.log(np.sum(alignment_score) + 1E-5)\n # pdb.set_trace()\n\n alignment_score = (1 - np.matmul(expected_direction, predicted_direction.transpose()).squeeze())\n index = np.argmin(alignment_score, axis=0)\n alignment_score = np.min(alignment_score, axis=0) + relative_direction_prediction[nx, mx, index]# absolute_log_probabilites[nx][abinx]\n alignment_score = np.min(relative_direction_prediction[nx, mx, index])\n alignment_scores.append(alignment_score)\n # indices.append(index)\n \n\n temp = np.array([metrics.quat_dist(quat_medoids[0], quat_medoids[k]) for k in range(0,24)]).round(2)\n alignment_scores = np.exp(np.array(alignment_scores))\n alignment_scores = np.log(alignment_scores/np.sum(alignment_scores) + 1E-10)\n bin_scores[nx,mx,:] = alignment_scores\n bin_scores = np.sum(bin_scores, axis=1)\n bin_scores = np.exp(bin_scores)\n bin_scores = np.log(1E-10 + bin_scores/np.sum(bin_scores, 1, keepdims=True))\n if ignore_bin_scores == True:\n bin_scores = bin_scores * 0\n \n # pdb.set_trace()\n abs_ent = self.compute_entropy(new_probability).reshape(-1, 1)\n rel_ent = self.compute_entropy(bin_scores).reshape(-1, 1)\n # pdb.set_trace()\n new_probability = 1.0 * new_probability + np.minimum(lambda_weight, 1.0)*(1*bin_scores) + 0.00 * quats_gt_binned_probs.data.cpu().numpy()\n new_probability = torch.from_numpy(new_probability).float()\n new_probability = torch.nn.functional.normalize(new_probability.exp(),1)\n self.codes_pred_eval['quat'] = Variable(new_probability.cuda())\n self.entropy_after_optim = (-1 * self.codes_pred_eval['quat'] * (self.codes_pred_eval['quat'] + 1E-10).log()).sum(1).data.cpu().numpy()\n\n\n\n\n self.rois_pos_eval=self.filter_pos([self.rois], pos_inds_eval)[0] # b x 5, 1:5 is box (x1 y1 x2 y2)\n self.codes_pred_eval['shape']=self.decode_shape(self.codes_pred_eval['shape']) # b x 1 x 32 x 32 x 32\n\n # if self.opts.gmm_rot:\n # self.codes_pred_eval[2] = self.decode_rotation_slerp(self.codes_pred_eval[2], \n # else:\n self.codes_pred_eval['quat']=self.decode_rotation(self.codes_pred_eval['quat']) # b x 4\n self.codes_pred_quat_before = self.decode_rotation(self.codes_pred_quat_before)\n # self.codes_pred_eval[2]=self.decode_rotation_topk(self.codes_pred_eval[2]) # b x 4\n \n # self.codes_pred_eval[2] = suncg_parse.quats_to_bininds(self.codes_gt[2].data.cpu(), self.quat_medoids)\n # self.codes_pred_eval[2] = Variable(suncg_parse.bininds_to_quats(self.codes_pred_eval[2], self.quat_medoids))\n \n self.codes_pred_eval['scale'] # Probably scale b x 3\n self.codes_pred_eval['trans'] # Probably trans b x 3\n\n self.scores_pred_eval=scores_pred[pos_inds_eval, :] * 1.\n if opts.pred_class:\n self.class_pred=self.decode_class(self.class_pred)\n # pdb.set_trace()\n min_score_vis=np.minimum(0.7, np.max(scores_pred))\n # pos_inds_vis = metrics.nms(\n # np.concatenate((bboxes_pred, scores_pred), axis=1),\n # 0.3, min_score=min_score_vis)\n\n pos_inds_vis=[i for i in range(n)]\n self.codes_pred_vis=self.filter_pos(codes_pred_all, pos_inds_vis)\n self.rois_pos_vis=self.filter_pos([self.rois], pos_inds_vis)[0]\n self.codes_pred_vis['shape']=self.decode_shape(self.codes_pred_vis['shape'])\n # self.codes_pred_vis[2]=self.decode_rotation(self.codes_pred_vis[2])\n self.codes_pred_vis['quat']=self.codes_pred_eval['quat']\n\n # self.layout_pred = self.layout_model.forward(self.input_imgs_layout)\n\n def clamp_to_image(self, amodal_bboxes, img_size):\n return torch.stack([torch.clamp(amodal_bboxes[:,0], 0, img_size[1]),\n torch.clamp(amodal_bboxes[:,1], 0, img_size[0]),\n torch.clamp(amodal_bboxes[:,2], 0, img_size[1]),\n torch.clamp(amodal_bboxes[:,3], 0, img_size[0])], dim=1)\n\n def compute_object_presence_parameters(self, amodal_bboxes, roi_bboxes, img_size):\n ## Compute % visible in the image\n ## % Overlap with other objects not visble\n ammodal_bboxes_clip_to_image = self.clamp_to_image(amodal_bboxes, img_size)\n size_box = amodal_bboxes[:,2:4] - amodal_bboxes[:,0:2]\n area_box = size_box[:,0]*size_box[:,1]\n\n size_image_box = ammodal_bboxes_clip_to_image[:,2:4] - ammodal_bboxes_clip_to_image[:,0:2]\n area_image_box = size_image_box[:,0]*size_image_box[:,1]\n\n size_roi_box = roi_bboxes[:,2:4] - roi_bboxes[:,0:2]\n area_roi_box = size_roi_box[:,0]*size_roi_box[:,1]\n\n return area_image_box/(area_box + 1), area_roi_box/(area_image_box + 1)\n\n def evaluate(self):\n # rois as numpy array\n # Get Predictions.\n # pdb.set_trace()\n opts = self.opts\n shapes = self.codes_pred_eval['shape'] \n scales = self.codes_pred_eval['scale']\n rots = self.codes_pred_eval['quat']\n trans = self.codes_pred_eval['trans']\n rots_before = self.codes_pred_quat_before\n trans=trans\n scores=self.scores_pred_eval\n boxes=self.rois_pos_eval.cpu().data.numpy()[:, 1:]\n # Get Ground Truth.\n # pdb.set_trace()\n gt_shapes = self.codes_gt['shape']\n gt_scales = self.codes_gt['scale']\n gt_rots = self.codes_gt['quat']\n gt_trans = self.codes_gt['trans']\n\n\n gt_boxes=self.rois.cpu().data.numpy()[:, 1:]\n iou_box=bbox_utils.bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float))\n trans_, gt_trans_=trans.cpu().data.numpy(), gt_trans.cpu().data.numpy()\n err_trans=np.linalg.norm(np.expand_dims(trans_, 1) - np.expand_dims(gt_trans_, 0), axis=2)\n err_pwd=np.zeros([len(err_trans)])\n\n\n err_rel_quat = (0*err_pwd).tolist()\n acc_rel = (0*err_pwd).tolist()\n # object_presence, object_visibility = self.compute_object_presence_parameters(self.amodal_bboxes, self.rois[:, 1:].data.cpu(),\n # [opts.img_height_fine, opts.img_width_fine])\n\n n_objects=len(gt_rots)\n\n acc_rel_dir_conditions = []\n acc_rel_dir = []\n err_rel_dir = []\n\n if opts.pred_relative:\n indices=[i + i * n_objects for i in range(n_objects)]\n\n if self.opts.classify_dir:\n relative_direction_predictions_classes = self.get_class_indices(self.relative_direction_prediction)\n entropy = -1*(self.relative_direction_prediction * self.relative_direction_prediction.exp()).sum(1).data.cpu().numpy()\n relative_direction_prediction = suncg_parse.bininds_to_directions(relative_direction_predictions_classes, self.direction_medoids)\n else:\n relative_direction_prediction = self.relative_direction_prediction.data.cpu()\n\n # pdb.set_trace()\n # pdb.set_trace()\n for i, (pred_dir, gt_dirs) in enumerate(zip(relative_direction_prediction, self.relative_direction_rotation)):\n # for i, (pred_dir, gt_dirs) in enumerate(zip(self.relative_direction_rotation, self.relative_direction_rotation)):\n if i in indices:\n continue\n\n src_i = i //n_objects\n src_c = self.index2object_class[self.object_classes.data[src_i, 0]]\n # if src_c != 'desk':\n # continue\n\n min_err = 180\n for gt_dir in gt_dirs:\n min_err = min(min_err, metrics.direction_dist(pred_dir.numpy(), gt_dir.numpy()))\n err_rel_dir.append(min_err)\n state = -1\n err_angle_iter = iter(err_rel_dir)\n if opts.classify_dir:\n for i, (pred, gt_bins, pred_dir, gt_dirs) in enumerate(zip(relative_direction_predictions_classes, self.relative_direction_rotation_binned,\n relative_direction_prediction, self.relative_direction_rotation)):\n if i in indices:\n continue\n if pred in gt_bins.data.cpu():\n acc_rel_dir.append(1)\n state = 1\n else:\n acc_rel_dir.append(0)\n state = 0\n src_i = i//n_objects\n trj_i = i % n_objects\n src_c = self.index2object_class[self.object_classes.data[src_i, 0]]\n trj_c = self.index2object_class[self.object_classes.data[trj_i, 0]]\n t = (\"{}\".format(self.image_names[0]), src_i, trj_i, pred, gt_bins.data.cpu().numpy(),\n np.linalg.norm(gt_trans_[src_i]- gt_trans_[trj_i]), state, \n next(err_angle_iter),\n src_c, trj_c,\n 0, 0,\n 0, 0, entropy[i])\n acc_rel_dir_conditions.append(t)\n else:\n acc_rel_dir.append(0)\n\n\n scales_, gt_scales_=scales.cpu().data.numpy(), gt_scales.cpu().data.numpy()\n err_scales=np.mean(np.abs(np.expand_dims(np.log(scales_), 1) - np.expand_dims(np.log(gt_scales_), 0)), axis=2)\n err_scales /= np.log(2.0)\n\n gt_quats = [t.data.cpu() for t in self.codes_gt_quats]\n\n\n ndt, ngt=err_scales.shape\n err_shapes=err_scales * 0.\n err_rots=err_scales * 0.\n err_rots_before = err_scales * 0\n # pdb.set_trace()\n for i in range(ndt):\n for j in range(ngt):\n err_shapes[i, j]=metrics.volume_iou(shapes[i, 0].data, gt_shapes[\n j, 0].data, thresh=self.opts.voxel_eval_thresh)\n if len(rots[i]) == 4:\n # err_rots[i, j]=metrics.quat_dist(rots[i].data.cpu(), gt_rots[j].data.cpu())\n q_errs = []\n for quat in gt_quats[j]:\n q_errs.append(metrics.quat_dist(rots[i].data.cpu(), quat))\n err_rots[i, j] = min(q_errs)\n else:\n m1 = metrics.quat_dist(rots[i][0].data.cpu(), gt_rots[j].data.cpu())\n m2 = metrics.quat_dist(rots[i][1].data.cpu(), gt_rots[j].data.cpu())\n err_rots[i, j] = min(m1, m2)\n\n for i in range(ndt):\n for j in range(ngt):\n err_shapes[i, j]=metrics.volume_iou(shapes[i, 0].data, gt_shapes[\n j, 0].data, thresh=self.opts.voxel_eval_thresh)\n if len(rots_before[i]) == 4:\n # err_rots[i, j]=metrics.quat_dist(rots[i].data.cpu(), gt_rots[j].data.cpu())\n q_errs = []\n for quat in gt_quats[j]:\n q_errs.append(metrics.quat_dist(rots_before[i].data.cpu(), quat))\n err_rots_before[i, j] = min(q_errs)\n else:\n m1 = metrics.quat_dist(rots_before[i][0].data.cpu(), gt_rots[j].data.cpu())\n m2 = metrics.quat_dist(rots_before[i][1].data.cpu(), gt_rots[j].data.cpu())\n err_rots_before[i, j] = min(m1, m2)\n\n err_rots=np.diag(err_rots).tolist()\n acc_rots = [1 if err < 30 else 0 for err in err_rots]\n err_rots_before = np.diag(err_rots_before).tolist()\n acc_rots_before = [1 if err < 30 else 0 for err in err_rots_before]\n err_trans=np.diag(err_trans).tolist()\n err_scales=np.diag(err_scales).tolist()\n err_pwd=err_pwd.tolist()\n err_shapes = np.diag(err_shapes).tolist()\n\n image_name = \"{}\".format(self.image_names[0])\n absolute_rot_conditions = []\n # pdb.set_trace()\n if self.opts.var_gmm_rot:\n self.codes_quat_var = np.sqrt(np.exp(self.codes_quat_var))*180/np.pi\n else:\n self.codes_quat_var = np.zeros([len(err_rots), self.opts.nz_rot])\n for ox in range(len(err_rots)):\n bf_class = suncg_parse.quats_to_bininds(rots_before[ox].data.unsqueeze(0), self.quat_medoids)[0]\n af_class = suncg_parse.quats_to_bininds(rots[ox].data.unsqueeze(0), self.quat_medoids)[0]\n t = (image_name, ox, self.index2object_class[self.object_classes.data[ox, 0]], err_rots_before[ox], err_rots[ox],\n self.entropy_before_optim[ox], self.entropy_after_optim[ox], bf_class, af_class,\n metrics.quat_dist(rots_before[ox].data, rots[ox].data), self.codes_quat_var[ox][bf_class])\n # pdb.set_trace()\n absolute_rot_conditions.append(t)\n\n\n # absolute_rot_conditions = [(err_b, err, entp_be, entp_af, metrics.quat_dist(rot_b.data, rot_af.data)) for err_b, err, entp_be, entp_af, rot_b, rot_af in zip(err_rots_before,\n # err_rots, self.entropy_before_optim, self.entropy_after_optim, rots_before, rots)]\n\n # for i in range(len(err_rots_before)):\n # if self.entropy_before_optim[i] < self.entropy_after_optim[i] - 1.0:\n # # pdb.set_trace()\n # err_rots[i] = err_rots_before[i]\n\n\n\n stats={'trans': err_trans, 'scales': err_scales,'shape': err_shapes, 'rot': err_rots, 'rot_b' : err_rots_before, 'acc_rots' : acc_rots, 'acc_rots_bef' : acc_rots_before,\n 'pwd': err_pwd, 'trans_updates': self.update_norm, 'acc_rot' : acc_rots, 'acc_rot_before' : acc_rots_before,\n # 'pwr': err_rel_quat, 'acc_rel_quat' : acc_rel_quat, \n 'acc_rel_dir' : acc_rel_dir, 'rel_dir': err_rel_dir\n }\n # print(stats)\n # pdb.set_trace()\n\n if opts.pred_class:\n correct=torch.sum(self.class_pred == self.object_classes.squeeze(1).data.cpu())\n total=len(self.class_pred)\n stats['correct']=correct\n stats['total']=total\n else:\n stats['correct']=0\n stats['total']=0\n \n if len(err_trans) == 1:\n stats = {}\n return stats, acc_rel_dir_conditions, absolute_rot_conditions\n\n def save_current_visuals(self, image_name):\n imgs_dir=osp.join(self.opts.results_quality_dir, '{}'.format(image_name))\n img_file = osp.join(imgs_dir, 'c_gt_objects_cam_view.png')\n if osp.exists(imgs_dir) and osp.exists(img_file) and False:\n return\n else:\n visuals=self.get_current_visuals()\n if not os.path.exists(imgs_dir) :\n os.makedirs(imgs_dir)\n for k in visuals:\n img_path=osp.join(imgs_dir, k + '.png')\n scipy.misc.imsave(img_path, visuals[k])\n\n def save_current_stats(self, bench, image_name):\n imgs_dir=osp.join(self.opts.results_quality_dir, '{}'.format(image_name))\n json_file=os.path.join(imgs_dir, 'bench_iter_{}.json'.format(0))\n # print(json_file)\n # if house_name == 'd49bb0b4b52cceffbe6086dfa1976e51':\n # pdb.set_trace()\n with open(json_file, 'w') as f:\n json.dump({'bench': bench}, f)\n\n def test_draw(self):\n opts=self.opts\n image_names=[]\n index_filename=opts.index_file\n if index_filename is not None:\n with open(index_filename) as f:\n for line in f:\n line=line.strip()\n image_names.append(line)\n\n # pdb.set_trace()\n # read the files for which you want to create visualizations?\n indices = []\n if len(image_names) == 0:\n rng_state = np.random.RandomState(0)\n indices = rng_state.choice(len(self.dataloader), 100)\n\n for i, batch in enumerate(self.dataloader):\n self.set_input(batch)\n self.vis_iter=i\n # print(i)\n if self.invalid_batch:\n continue\n image_name=batch['image_name'][0]\n example_id='{}'.format(image_name)\n if example_id in image_names or (len(image_names) == 0 and i in indices) :\n self.predict()\n bench_image_stats,_,_=self.evaluate()\n self.save_current_visuals(image_name)\n self.save_current_stats(bench_image_stats, image_name)\n print(\"Generating {}\".format(i))\n\n\n\n def test(self):\n # Choose 30 random examples and save it.\n opts=self.opts\n if not opts.preload_stats:\n invalid_rois=0\n bench_stats=[]\n acc_rel_conditions_all = []\n head = ['house_name_view_id', 'src_index', 'trj_index', 'PC', 'GT', 'D', 'Acc', 'Err', 'SC', 'TC', 'VS', 'VT', 'PS', 'PT', 'Entropy', 'Var']\n acc_rel_conditions_all.append(head)\n acc_rel_dir_conditions_all = [head]\n absolute_rot_conditions_all = [['house_name_view_id', 'obj_index', 'obj_class', 'err_before', 'err', 'entropy_before', 'entropy_after', 'rot_b', 'rot_af', 'diff_bw_bf_af']]\n\n # codes are (shapes, scales, quats, trans)\n n_iter=len(self.dataloader)\n for i, batch in enumerate(self.dataloader):\n if i % 100 == 0:\n print('{}/{} evaluation iterations.'.format(i, n_iter))\n if opts.max_eval_iter > 0 and (i >= opts.max_eval_iter):\n break\n self.set_input(batch)\n if not self.invalid_batch:\n self.predict()\n # pdb.seto_trace()\n image_name = batch['image_name'][0]\n bench_image_stats, acc_rel_dir_conditions, absolute_rot_conditions =self.evaluate()\n acc_rel_dir_conditions_all.extend(acc_rel_dir_conditions)\n absolute_rot_conditions_all.extend(absolute_rot_conditions)\n json_file=osp.join(opts.results_eval_dir, 'eval_result_{}.json'.format(image_name))\n bench_image_stats['image_name']=batch['image_name'][0]\n # pdb.set_trace()\n with open(json_file, 'w') as f:\n json.dump({'bench': bench_image_stats}, f)\n\n bench_stats.append(bench_image_stats)\n\n # if opts.save_visuals and (i % opts.visuals_freq == 0):\n # self.save_current_visuals()\n else:\n if self.invalid_rois is not None:\n print(\"Total rois {}\".format(self.invalid_rois.numel() / 5))\n invalid_rois += 1\n # if i > 10:\n # break\n # break\n\n print(\"% of RoI invalid {}\".format(invalid_rois * 100.0 / n_iter))\n\n\n # Accumalate stats and print\n acc_stats={'trans': [], 'scales': [], 'shape' : [], 'rot_b': [], 'rot': [], 'trans_updates': [],\n # 'pwr': [], 'acc_rel_quat' : [], \n 'acc_rel_dir' : [], 'rel_dir' : [], 'acc_rot' : [], 'acc_rot_before' : []}\n class_stats={'correct': [], 'total': []}\n for bench in bench_stats:\n for key in acc_stats.keys():\n if key in bench:\n acc_stats[key].extend(bench[key])\n for key in class_stats.keys():\n if key in bench:\n class_stats[key].append(bench[key])\n\n # acc_threshold = {'shape' : 0.25 , 'trans' : 1, 'rot_b' : 30, 'rot' : 30, 'scales':0.5}\n acc_threshold = {'shape' : 0.25 , 'trans' : 0.5, 'rot_b' : 30, 'rot' : 30, 'scales':0.2}\n for key, thres in acc_threshold.items():\n acc_stats[\"{}_acc\".format(key)] = [1 if v < thres else 0 for v in acc_stats[key]]\n\n # pdb.set_trace()\n json_file=os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.json'.format(opts.id, opts.eval_set, 0))\n\n print('Writing results to file: {:s}'.format(json_file))\n with open(json_file, 'w') as f:\n json.dump(acc_stats, f)\n else:\n json_file=os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.json'.format(opts.id, opts.eval_set, 0))\n with open(json_file) as f:\n acc_stats=json.load(f)\n\n # Print mean error and median error\n metrics={'mean': np.mean, 'median': np.median}\n criterias={'trans', 'scales', 'rot','rot_b', 'trans_updates', 'shape',\n # 'pwr', 'acc_rel_quat',\n 'acc_rel_dir', 'rel_dir', 'acc_rot', 'acc_rot_before',\n 'trans_acc', 'rot_b_acc', 'rot_acc', 'scales_acc', 'shape_acc'}\n\n for key in criterias:\n for mkey in metrics.keys():\n print('{} {} : {:0.3f}'.format(mkey, key, metrics[mkey](np.array(acc_stats[key]))))\n\n for key in acc_stats.keys():\n acc_stats[key]=np.array(acc_stats[key])\n\n # keys=['trans', 'scales', 'rot', 'pwd', 'trans_updates', 'pwr']\n key_clip={'shape' : 1.0, 'trans': 3.0, 'pwd': 5.0, 'scales': 1.5, 'rot_b': 180, 'rot': 180,'trans_updates': 4, 'pwr': 180 , 'rel_dir': 180}\n for key in criterias:\n err=acc_stats[key]\n if 'acc' in key:\n clip_max = 2\n continue\n else:\n clip_max=key_clip[key]\n values, base=np.histogram(np.clip(np.array(err), 0, clip_max), 40)\n cumulative=np.cumsum(values)\n cumulative=cumulative / len(err)\n plt.plot(cumulative, base[:-1], c='blue')\n plt.plot([0.0, 1.0], [np.mean(err), np.mean(err)], c='red')\n plt.title('Error {} vs data-fraction'.format(key))\n plt.savefig(os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.png'.format(opts.id, opts.eval_set, key)))\n plt.close()\n\n with open(os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.pkl'.format(opts.id, opts.eval_set, key)) , 'wb') as f:\n pickle.dump({'err' : acc_stats[key], 'freq_values' : cumulative, 'bin_values': base[:-1]}, f)\n\n\n if self.opts.pred_class:\n correct=sum(class_stats['correct'])\n total=sum(class_stats['total'])\n print('{}, {}/{} {}'.format('class accuracy', correct, total, correct * 1.0 / total))\n\n\n if opts.log_csv is not None:\n with open(\"{}_rel_dir.csv\".format(opts.log_csv), 'w') as f:\n for acc_cond in acc_rel_dir_conditions_all:\n for val in list(acc_cond):\n if type(val) == np.ndarray:\n t = list(set([str(x) for x in val]))\n t.sort()\n f.write('{},'.format(';'.join(t)))\n else:\n if type(val) == str:\n f.write('{},'.format(val))\n else:\n f.write('{},'.format(str(np.round(val, 3))))\n f.write('\\n')\n\n if opts.log_csv is not None:\n with open(\"{}_abs_rot.csv\".format(opts.log_csv), 'w') as f:\n for entropy_cond in absolute_rot_conditions_all:\n for val in list(entropy_cond):\n if type(val) == np.ndarray:\n t = list(set([str(x) for x in val]))\n t.sort()\n f.write('{},'.format(';'.join(t)))\n else:\n if type(val) == str:\n f.write('{},'.format(val))\n else:\n f.write('{},'.format(str(np.round(val, 3))))\n f.write('\\n')\n\n\ndef main(_):\n FLAGS.suncg_dl_out_codes=True\n FLAGS.suncg_dl_out_fine_img=True\n FLAGS.suncg_dl_out_test_proposals=False\n FLAGS.suncg_dl_out_voxels=False\n FLAGS.suncg_dl_out_layout=False\n FLAGS.suncg_dl_out_depth=False\n # FLAGS.n_data_workers=4\n FLAGS.max_views_per_house=2\n \n\n FLAGS.batch_size=1\n assert(FLAGS.batch_size == 1)\n\n if FLAGS.results_name is None:\n FLAGS.results_name=FLAGS.name\n\n FLAGS.results_vis_dir=osp.join(FLAGS.results_vis_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.results_quality_dir=osp.join(FLAGS.results_quality_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.results_eval_dir=osp.join(FLAGS.results_eval_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.rendering_dir = osp.join(FLAGS.rendering_dir, FLAGS.results_name)\n if not os.path.exists(FLAGS.results_eval_dir):\n os.makedirs(FLAGS.results_eval_dir)\n if not os.path.exists(FLAGS.results_vis_dir):\n os.makedirs(FLAGS.results_vis_dir)\n\n torch.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n if not FLAGS.classify_rot:\n FLAGS.nz_rot=4\n\n\n if not FLAGS.classify_dir:\n FLAGS.nz_rel_dir=3\n\n tester=DWRTester(FLAGS)\n tester.init_testing()\n if not FLAGS.draw_vis:\n tester.test()\n else:\n tester.test_draw()\n\n # pred_clases = torch.cat(tester.stored_quat_relative_pred_classes).numpy()\n # gt_clases = torch.cat(tester.stored_quat_relative_gt_classes).numpy()\n\n # with open(osp.join(FLAGS.results_eval_dir, 'pred_relative_classes.npy'),'w') as f:\n # np.save(f, pred_clases)\n # with open(osp.join(FLAGS.results_eval_dir, 'gt_relative_classes.npy'),'w') as f:\n # np.save(f, gt_clases)\n # # pdb.set_trace()\n\n\nif __name__ == '__main__':\n app.run(main)\n" ]
[ [ "numpy.vstack", "numpy.sum", "numpy.ones", "numpy.cumsum", "numpy.divide", "numpy.zeros", "numpy.argsort", "numpy.hstack", "numpy.where" ], [ "numpy.ones", "numpy.sum", "numpy.diag", "numpy.random.seed", "numpy.random.RandomState", "numpy.log", "matplotlib.pyplot.plot", "torch.cat", "numpy.argmin", "torch.autograd.Variable", "torch.norm", "torch.from_numpy", "numpy.expand_dims", "torch.index_select", "matplotlib.use", "numpy.round", "torch.Tensor", "numpy.mean", "numpy.minimum", "torch.mean", "torch.ones", "numpy.zeros", "torch.nn.functional.normalize", "torch.manual_seed", "torch.zeros", "numpy.max", "numpy.min", "matplotlib.pyplot.close", "numpy.linalg.norm", "torch.nn.functional.log_softmax", "numpy.cumsum", "numpy.exp", "numpy.linalg.lstsq", "numpy.array", "numpy.concatenate", "numpy.random.randint", "torch.clamp" ] ]
storopoli/Machine-Learning-Probalistic
[ "fb939b92a61f7d3a7e6c28d0b14f58d99a0b07f1" ]
[ "pyprobml-master/examples/betaCredibleInt.py" ]
[ "from scipy.stats import beta\nimport numpy as np\n\nS = 47\nN = 100 \na = S+1\nb = (N-S)+1 \nalpha = 0.05;\n\nCI1 = beta.interval(1-alpha, a, b)\n\nl = beta.ppf(alpha/2, a, b)\nu = beta.ppf(1-alpha/2, a, b)\nCI2 = (l,u)\n\nsamples = beta.rvs(a, b, size=1000)\nsamples = np.sort(samples)\nCI3 = np.percentile(samples, 100*np.array([alpha/2, 1-alpha/2])) \n\nprint(CI1)\nprint(CI2)\nprint(CI3)\n" ]
[ [ "scipy.stats.beta.interval", "scipy.stats.beta.rvs", "scipy.stats.beta.ppf", "numpy.sort", "numpy.array" ] ]
YasuShimizu/1D-Free-Water-Surface
[ "38a86e167b43ebe46187aa6e781a93414136235f" ]
[ "initial.py" ]
[ "import numpy as np\r\n\r\ndef eta_init(eta,eta0,eta_up,eta_up0,nx,dx, \\\r\n slope,xl,xb1,xb2,xb3,dbed):\r\n zb0=xl*slope\r\n for i in np.arange(0,nx+2):\r\n xx=dx*float(i)\r\n eta_up[i]=zb0-xx*slope\r\n eta_up0[i]=eta_up[i]\r\n# print(i,nx,eta_up[i])\r\n if xx>xb1 and xx<xb2:\r\n ss=xx-xb1\r\n deta=dbed*ss/(xb2-xb1)\r\n eta_up[i]=eta_up[i]+deta\r\n elif xx>=xb2 and xx<xb3:\r\n ss=xb3-xx\r\n deta=dbed*ss/(xb3-xb2)\r\n eta_up[i]=eta_up[i]+deta\r\n\r\n for i in np.arange(1,nx+2):\r\n eta[i]=(eta_up[i]+eta_up[i-1])*.5\r\n eta0[i]=(eta_up0[i]+eta_up0[i-1])*.5\r\n eta[0]=2.*eta[1]-eta[2]\r\n eta0[0]=2.*eta0[1]-eta0[2]\r\n return eta,eta0,eta_up,eta_up0\r\n\r\ndef eta_init_2(eta,eta0,eta_up,eta_up0,nx,dx, \\\r\n xl,x_slope,slope1,slope2):\r\n zb0=x_slope*slope1+(xl-x_slope)*slope2\r\n zb1=zb0-x_slope*slope1\r\n for i in np.arange(0,nx+2):\r\n xx=dx*float(i)\r\n if xx <= x_slope:\r\n eta_up[i]=zb0-xx*slope1\r\n else:\r\n eta_up[i]=zb1-(xx-x_slope)*slope2 \r\n eta_up0[i]=eta_up[i]\r\n\r\n for i in np.arange(1,nx+2):\r\n eta[i]=(eta_up[i]+eta_up[i-1])*.5\r\n eta0[i]=(eta_up0[i]+eta_up0[i-1])*.5\r\n eta[0]=2.*eta[1]-eta[2]\r\n eta0[0]=2.*eta0[1]-eta0[2]\r\n return eta,eta0,eta_up,eta_up0\r\n\r\ndef h_init(eta,eta0,eta_up,eta_up0,h,hs,h_up,hs_up, \\\r\n hs_upstm,hs_dwstm,nx,dx,xl):\r\n xhalf=xl*.95\r\n for i in np.arange(0,nx+1):\r\n xlen=i*dx\r\n if xlen<xhalf:\r\n hs_up[i]=hs_upstm\r\n else:\r\n hs_up[i]=hs_dwstm\r\n\r\n# hs_up[i]=hs_upstm+(hs_dwstm-hs_upstm)*xlen/xl\r\n h_up[i]=eta_up0[i]+hs_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=(hs_up[i]+hs_up[i-1])*.5\r\n h[i]=eta0[i]+hs[i]\r\n\r\n for i in np.arange(0,nx+1):\r\n hs_up[i]=h_up[i]-eta_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=h[i]-eta[i]\r\n\r\n hs[0]=hs_upstm; h[0]=eta[0]+hs_upstm\r\n hs[nx+1]=hs_dwstm; h[nx+1]=eta[nx+1]+hs_dwstm\r\n\r\n return h,hs,h_up,hs_up\r\n\r\ndef h_init_2(eta,eta0,eta_up,eta_up0,h,hs,h_up,hs_up, \\\r\n hs_upstm,hs_dwstm,nx,dx,xl,x_slope):\r\n\r\n for i in np.arange(0,nx+1):\r\n xlen=i*dx\r\n if xlen<x_slope:\r\n hs_up[i]=hs_upstm\r\n else:\r\n hs_up[i]=hs_dwstm\r\n\r\n# hs_up[i]=hs_upstm+(hs_dwstm-hs_upstm)*xlen/xl\r\n h_up[i]=eta_up0[i]+hs_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=(hs_up[i]+hs_up[i-1])*.5\r\n h[i]=eta0[i]+hs[i]\r\n\r\n for i in np.arange(0,nx+1):\r\n hs_up[i]=h_up[i]-eta_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=h[i]-eta[i]\r\n\r\n hs[0]=hs_upstm; h[0]=eta[0]+hs_upstm\r\n hs[nx+1]=hs_dwstm; h[nx+1]=eta[nx+1]+hs_dwstm\r\n\r\n return h,hs,h_up,hs_up\r\n\r\ndef u_init(g,qp,u,hs_up,fr,nx):\r\n for i in np.arange(0,nx+1):\r\n u[i]=qp/hs_up[i]\r\n fr[i]=u[i]/np.sqrt(g*hs_up[i])\r\n# print(i,hs_up[i],u[i],fr[i])\r\n\r\n return u,fr\r\n\r\ndef x_cell_init(x_cell,x,dx,nx):\r\n for i in np.arange(1,nx+1):\r\n x_cell[i]=(x[i]+x[i-1])*.5\r\n x_cell[0]=x_cell[1]-dx\r\n x_cell[nx+1]=x_cell[nx]+dx\r\n\r\n return x_cell\r\n\r\ndef h0_cal(eta,eta_up,nx,dx,qp,snm,h0_up):\r\n for i in np.arange(1,nx):\r\n slope=(eta[i]-eta[i+1])/dx\r\n if slope<=0. :\r\n h0=0.\r\n else:\r\n h0=(qp*snm/np.sqrt(slope))**(.6)\r\n h0_up[i]=eta_up[i]+h0\r\n if i==1:\r\n h0_up[0]=eta_up[0]+h0\r\n elif i==nx-1:\r\n h0_up[nx]=eta_up[nx]+h0\r\n\r\n return h0_up\r\n" ]
[ [ "numpy.arange", "numpy.sqrt" ] ]
dennis-l/tolteca
[ "1dffaffb585eb7027e26b34ae01e8632bef134cb" ]
[ "tolteca/simu/toltec/models.py" ]
[ "#!/usr/bin/env python\n\n\nfrom gwcs import coordinate_frames as cf\nimport astropy.units as u\nfrom astropy.time import Time\nfrom astropy.modeling import models, Parameter, Model\nfrom astropy.coordinates import SkyCoord, Angle\nfrom astropy.table import Table\nfrom astropy.cosmology import default_cosmology\nfrom astropy import constants as const\nfrom astropy.utils.decorators import classproperty\nfrom scipy.interpolate import interp1d\nfrom dataclasses import dataclass, field\nimport numpy as np\n\nfrom tollan.utils.dataclass_schema import add_schema\nfrom tollan.utils.log import timeit, get_logger\nfrom tollan.utils.fmt import pformat_yaml\nfrom kidsproc.kidsmodel import _Model as ComplexModel\nfrom contextlib import contextmanager, ExitStack\n\nfrom ...utils.common_schema import PhysicalTypeSchema\nfrom ...utils import get_pkg_data_path\nfrom .toltec_info import toltec_info\nfrom .lmt import get_lmt_atm_models\n\nfrom ..base import ProjModel, LabelFrame\nfrom ..sources.base import PowerLoadingModel\nfrom ..mapping.utils import rotation_matrix_2d, _get_skyoffset_frame\n\n\n__all__ = [\n 'pa_from_coords',\n 'ToltecArrayProjModel', 'ToltecSkyProjModel',\n 'KidsReadoutNoiseModel',\n 'ToltecArrayPowerLoadingModel',\n 'ToltecPowerLoadingModel'\n ]\n\n\ndef pa_from_coords(observer, coords_altaz, coords_icrs):\n \"\"\"Calculate parallactic angle at coords.\n\n \"\"\"\n # TODO: revisit this\n # http://star-www.st-and.ac.uk/~fv/webnotes/chapter7.htm\n # note that their are issues with these values\n # where cosha^2 + sinha^2 is off from 1. by 0.1%. This\n # gives about 0.7 deg of deviation from the direct\n # calculation using LST from time_obs\n cosha = (\n np.sin(coords_altaz.alt.radian)\n - np.sin(coords_icrs.dec.radian)\n * np.sin(observer.location.lat.radian)) / (\n np.cos(coords_icrs.dec.radian)\n * np.cos(observer.location.lat.radian)\n )\n sinha = (\n -np.sin(coords_altaz.az.radian)\n * np.cos(coords_altaz.alt.radian)\n / np.cos(coords_icrs.dec.radian)\n )\n # print(sinha ** 2 + cosha ** 2 - 1)\n parallactic_angle = Angle(np.arctan2(\n sinha,\n (\n np.tan(observer.location.lat.radian)\n * np.cos(coords_icrs.dec.radian)\n - np.sin(coords_icrs.dec.radian)\n * cosha)\n ) << u.rad)\n return parallactic_angle\n\n\nclass ToltecArrayProjModel(ProjModel):\n \"\"\"\n A model to transform TolTEC detector locations and orientations on the\n each array to a common TolTEC instrument frame defined in offset angle\n unit, with the extent of arrays normalized to the size of the on-sky\n field of view.\n\n The TolTEC frame is attached to the TolTEC instrument body and describes\n the projected positions and orientations of all detectors on the sky. The\n origin of the TolTEC frame is fixed at the telescope bore sight.\n\n The two axes az_offset and alt_offset is aligned with the telescope\n Az/Alt at altitude of 0 deg, and they rotate by the value of the altitude\n following the left hand rule.\n\n The orientations of detectors also get projected to the TolTEC frame,\n where the P.A = 0 is set to be the +alt_offset and the sign convention\n follows the left hand rule.\n \"\"\"\n\n input_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='det_pos',\n axes_names=(\"x\", \"y\"),\n unit=(u.um, u.um),\n ),\n LabelFrame(\n axes_names=['array', 'fg'], axes_order=(2, 3),\n name='det_prop'),\n ], name='focal_plane')\n output_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='sky_offset',\n axes_names=(\"az_offset\", \"alt_offset\"),\n unit=(u.deg, u.deg)),\n cf.CoordinateFrame(\n naxes=1,\n axes_type='SPATIAL',\n axes_order=(2, ),\n unit=(u.deg, ),\n axes_names=(\"pa\", ),\n name='det_pa'),\n ], name='toltec')\n n_inputs = input_frame.naxes\n n_outputs = output_frame.naxes\n\n _array_index_to_mounting_angle = {\n toltec_info[array_name]['index']:\n toltec_info[array_name]['array_mounting_angle']\n for array_name in toltec_info['array_names']\n }\n\n _fg_to_det_pa = {\n toltec_info[fg_name]['index']:\n toltec_info[fg_name]['det_pa']\n for fg_name in toltec_info['fg_names']\n }\n\n _plate_scale = toltec_info['fov_diameter'] \\\n / toltec_info['array_physical_diameter']\n # this is need to make the affine transform work correctly\n _plate_unit = toltec_info['array_physical_diameter'].unit\n\n _mat_refl = np.array([[1, 0], [0, -1]], dtype='d')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # build the models for transforming x and y\n m_pos = dict()\n m_pa = dict()\n for ai, rot in self._array_index_to_mounting_angle.items():\n m_pos[ai] = models.AffineTransformation2D(\n (\n rotation_matrix_2d(rot.to_value(u.rad)) @ self._mat_refl\n ) << self._plate_unit,\n translation=(0., 0.) << self._plate_unit\n ) | (\n models.Multiply(self._plate_scale) &\n models.Multiply(self._plate_scale)\n )\n for fg, pa in self._fg_to_det_pa.items():\n m_pa[(ai, fg)] = models.Const1D(pa + rot)\n m_proj = dict()\n for k, m in m_pa.items():\n # build the full proj model\n # k[0] is array index\n m_proj[k] = models.Mapping((0, 1, 0)) | m_pos[k[0]] & m_pa[k]\n self._m_pos = m_pos\n self._m_pa = m_pa\n self._m_proj = m_proj\n\n def evaluate(self, x, y, array, fg):\n # note that both array and fg are coerced to double and\n # we need to make them int before creating the masks\n array = array.astype(int)\n fg = fg.astype(int)\n # loop over proj models and populate result\n result = np.empty((self.n_outputs, ) + x.shape, dtype='d') << u.deg\n # this is used to check if all values are covered\n not_computed = np.ones(x.shape, dtype=bool)\n for k, m in self._m_proj.items():\n mask = (array == k[0]) & (fg == k[1])\n result[0, mask], result[1, mask], result[2, mask] = m(\n x[mask], y[mask])\n not_computed[mask] = False\n if np.sum(not_computed) > 0:\n invalid = np.unique(\n np.vstack([\n array[not_computed],\n fg[not_computed]]),\n axis=1\n ).T\n raise ValueError(\n f\"Invalid (array, fg) in input: {invalid}\")\n # apply the transformation for each unit\n return result\n\n\nclass ToltecSkyProjModel(ProjModel):\n \"\"\"\n A model to transform TolTEC detector positions and orientations\n expressed in offset angular unit in the TolTEC frame to\n absolute world coordinates for given telescope bore sight target\n and time of obs.\n\n The output coordinate frame is a generic sky lon/lat frame which\n can represent any of the valid celestial coordinate frames supported,\n by specifying the ``evaluate_frame`` keyword argument.\n \"\"\"\n\n logger = get_logger()\n\n def __init__(\n self,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n origin_coords_icrs, origin_coords_altaz, \\\n origin_az, origin_alt, mjd = self._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=True,\n )\n if np.isscalar(mjd):\n n_models = 1\n else:\n n_models = len(mjd)\n super().__init__(\n origin_az=origin_az, origin_alt=origin_alt, mjd=mjd,\n n_models=n_models)\n self._origin_coords_icrs = origin_coords_icrs\n self._origin_coords_altaz = origin_coords_altaz\n # this is to be overridden by the __call__ so that we can\n # ensure the evaluation is always done with __call__\n self._eval_context = None\n\n def __setattr__(self, attr, value):\n # since we cache the origin coords and we need to disallow\n # changing the params to make all of the values in-sync.\n if attr in ('origin_az', 'origin_alt', 'mjd'):\n raise AttributeError(f'{attr} is read-only')\n return super().__setattr__(attr, value)\n\n @classmethod\n def _make_origin_coords(\n cls,\n origin_coords_icrs, origin_coords_altaz, time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=True,\n ):\n if sum([origin_coords_altaz is None, origin_coords_icrs is None]) == 2:\n raise ValueError(\n \"at least one of origin_coords_{altaz,icrs} is needed.\")\n if origin_coords_altaz is None and (ensure_altaz or return_params):\n # compute origin altaz from icrs and time_obs\n if time_obs is None:\n raise ValueError(\"time is need to transform to altaz.\")\n with timeit(\"transform origin from icrs to altaz\"):\n origin_coords_altaz = origin_coords_icrs.transform_to(\n cls.observer.altaz(time=time_obs))\n if origin_coords_icrs is None and ensure_icrs:\n # compute origin icrs from altaz\n with timeit(\"transform origin from altaz to icrs\"):\n origin_coords_icrs = origin_coords_altaz.transform_to(\"icrs\")\n if return_params:\n origin_az = origin_coords_altaz.az\n origin_alt = origin_coords_altaz.alt\n mjd = (origin_coords_altaz.frame.obstime.mjd) << u.day\n return (\n origin_coords_icrs, origin_coords_altaz,\n origin_az, origin_alt, mjd)\n return (origin_coords_icrs, origin_coords_altaz)\n\n input_frame = ToltecArrayProjModel.output_frame\n output_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='sky',\n axes_names=(\"lon\", \"lat\"),\n unit=(u.deg, u.deg)),\n cf.CoordinateFrame(\n naxes=1,\n axes_type='SPATIAL',\n axes_order=(2, ),\n unit=(u.deg, ),\n axes_names=(\"pa\", ),\n name='det_pa'),\n ], name='sky')\n\n n_inputs = input_frame.naxes\n n_outputs = output_frame.naxes\n\n origin_az = Parameter(\n default=180.,\n unit=output_frame.unit[0],\n description='The Az of the telescope bore sight.'\n )\n origin_alt = Parameter(\n default=60.,\n unit=output_frame.unit[1],\n description='The Alt of the telescope bore sight.'\n )\n mjd = Parameter(\n default=Time(2022.0, format='jyear').mjd,\n unit=u.day,\n description='The UT of observation expressed in MJD.'\n )\n\n observer = toltec_info['site']['observer']\n \"\"\"The observer (LMT).\"\"\"\n\n @classmethod\n def _get_altaz_frame(cls, mjd):\n return cls.observer.altaz(time=Time(mjd, format='mjd'))\n\n @classmethod\n def _get_origin_coords_altaz(cls, origin_az, origin_alt, mjd):\n \"\"\"Return the origin coordinates in AltAz.\"\"\"\n return SkyCoord(\n origin_az,\n origin_alt,\n frame=cls._get_altaz_frame(mjd)\n )\n\n @classmethod\n @timeit\n def _get_altaz_offset_frame(cls, origin_coords_altaz):\n \"\"\"Return the sky offset frame in AltAz centered at origin.\"\"\"\n return _get_skyoffset_frame(origin_coords_altaz)\n\n @classmethod\n @timeit\n def evaluate_altaz(\n cls, x, y, pa,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n \"\"\"Compute the projected coordinates in AltAz using full\n transformation.\n \"\"\"\n _, origin_coords_altaz = cls._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=False,\n return_params=False,\n )\n # now we always have origin_coords_altaz\n with timeit(\"apply rotation to detector offset coords\"):\n origin_alt = origin_coords_altaz.alt\n # The first step has to be rotation the toltec frame by\n # the amount of origin_coords_altaz.alt, due to the M3 mirror.\n mat_rot_m3 = rotation_matrix_2d(origin_alt.to_value(u.rad))\n\n # there should be more clever way of this but for now\n # we just spell out the rotation because x and y are already\n # separated arrays\n x_offset_altaz = mat_rot_m3[0, 0] * x + mat_rot_m3[0, 1] * y\n y_offset_altaz = mat_rot_m3[1, 0] * x + mat_rot_m3[1, 1] * y\n # y_offset_altaz = mat_rot_m3[1, 0][:, np.newaxis] \\\n # * x[np.newaxis, :] \\\n # + mat_rot_m3[1, 1][:, np.newaxis] * y[np.newaxis, :]\n # the pa get rotated by the value of alt\n pa_altaz = (pa + origin_alt).to(u.deg)\n\n # now do the coordinate transformation\n with timeit(\"transform detector offset coords to altaz\"):\n altaz_offset_frame = cls._get_altaz_offset_frame(\n origin_coords_altaz)\n det_coords_altaz_offset = SkyCoord(\n x_offset_altaz, y_offset_altaz, frame=altaz_offset_frame)\n det_coords_altaz = det_coords_altaz_offset.transform_to(\n origin_coords_altaz.frame)\n return det_coords_altaz.az, det_coords_altaz.alt, pa_altaz\n\n @classmethod\n @timeit\n def evaluate_icrs_fast(\n cls, x, y, pa,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n \"\"\"Compute the projected coordinates in ICRS with small field\n approximation (TolTEC FOV is small ~4 arcmin) directly.\n \"\"\"\n origin_coords_icrs, origin_coords_altaz = cls._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=False,\n )\n with timeit(\"compute rotation angle from toltec frame to icrs\"):\n origin_par_angle = cls.observer.parallactic_angle(\n origin_coords_altaz.obstime,\n origin_coords_icrs)\n # now we can rotate the x y and pa by alt + par_ang\n rot = origin_coords_altaz.alt + origin_par_angle\n\n with timeit(\"apply rotation to detector offset coords\"):\n # The first step has to be rotation the toltec frame by\n # the amount of origin_alt, due to the M3 mirror.\n mat_rot_m3 = rotation_matrix_2d(rot.to_value(u.rad))\n\n # there should be more clever way of this but for now\n # we just spell out the rotation because x and y are already\n # separated arrays\n x_offset_icrs = mat_rot_m3[0, 0][:, np.newaxis] \\\n * x[np.newaxis, :] \\\n + mat_rot_m3[0, 1][:, np.newaxis] * y[np.newaxis, :]\n y_offset_icrs = mat_rot_m3[1, 0][:, np.newaxis] \\\n * x[np.newaxis, :] \\\n + mat_rot_m3[1, 1][:, np.newaxis] * y[np.newaxis, :]\n # the pa get rotated by the value of rot\n pa_icrs = pa + rot\n\n with timeit(\"transform detector offset coords to icrs\"):\n # now we need to build the icrs offset frame and transform back to\n # absolute coordinates\n icrs_offset_frame = _get_skyoffset_frame(origin_coords_icrs)\n\n det_coords_icrs_offset = SkyCoord(\n x_offset_icrs, y_offset_icrs, frame=icrs_offset_frame)\n det_coords_icrs = det_coords_icrs_offset.transform_to(\n origin_coords_icrs.frame)\n return det_coords_icrs.ra, det_coords_icrs.dec, pa_icrs\n\n @staticmethod\n def _check_frame_by_name(frame, frame_name):\n if isinstance(frame, str):\n return frame == frame_name\n return frame.name == frame_name\n\n @timeit\n def evaluate(\n self,\n x, y, pa, origin_az, origin_alt, mjd):\n # make sure we have _eval_context set before proceed\n eval_ctx = self._eval_context\n if eval_ctx is None:\n raise ValueError(\"This model can only be evaluated with __call__\")\n evaluate_frame = eval_ctx['evaluate_frame']\n\n # create origin coords in altaz\n origin_coords_altaz = self._get_origin_coords_altaz(\n origin_az=origin_az, origin_alt=origin_alt,\n mjd=mjd)\n\n result_altaz = self.evaluate_altaz(\n x, y, pa, origin_coords_altaz=origin_coords_altaz)\n\n # update evaluate_context\n result_az, result_alt, pa_altaz = result_altaz\n coords_altaz = SkyCoord(\n az=result_az, alt=result_alt, frame=origin_coords_altaz.frame\n )\n eval_ctx['pa_altaz'] = pa_altaz\n eval_ctx['coords_altaz'] = coords_altaz\n\n if self._check_frame_by_name(evaluate_frame, 'altaz'):\n return result_altaz\n elif self._check_frame_by_name(evaluate_frame, 'icrs'):\n # TODO the handling of other frame for the PA has to be on a\n # per-frame basis? So we only implement for now the ICRS\n with timeit(\"transform detector coords from altaz to icrs\"):\n coords_icrs = coords_altaz.transform_to('icrs')\n # calculate the par angle between the two set of coords\n dpa_altaz_icrs = pa_from_coords(\n observer=self.observer,\n coords_altaz=coords_altaz,\n coords_icrs=coords_icrs)\n pa_icrs = pa_altaz + dpa_altaz_icrs\n eval_ctx['pa_icrs'] = pa_icrs\n eval_ctx['coords_icrs'] = coords_icrs\n eval_ctx['dpa_altaz_icrs'] = dpa_altaz_icrs\n return coords_icrs.ra, coords_icrs.dec, pa_icrs\n else:\n raise ValueError(f\"invalid evaluate_frame {evaluate_frame}\")\n\n @timeit('toltec_sky_proj_evaluate')\n def __call__(\n self, *args,\n evaluate_frame='icrs',\n use_evaluate_icrs_fast=False,\n return_eval_context=False):\n\n result_eval_context = dict(\n evaluate_frame=evaluate_frame,\n )\n\n @contextmanager\n def _set_eval_context():\n nonlocal result_eval_context\n self._eval_context = result_eval_context\n yield\n self._eval_context = None\n\n def wrap_return(result):\n nonlocal result_eval_context\n if return_eval_context:\n return result, result_eval_context\n return result\n\n with _set_eval_context():\n if self._check_frame_by_name(evaluate_frame, 'icrs') and \\\n use_evaluate_icrs_fast:\n # use the fast icrs eval\n return wrap_return(self.evaluate_icrs_fast(\n *args,\n origin_coords_altaz=self._origin_coords_altaz,\n origin_coords_icrs=self._origin_coords_icrs,\n ))\n return wrap_return(super().__call__(*args))\n\n # TODO this is to override the default behavior of checking the model\n # axis. We allow the model axis to broadcasted with size=1.\n def _validate_input_shape(\n self, _input, idx, argnames, model_set_axis, check_model_set_axis):\n \"\"\"\n Perform basic validation of a single model input's shape\n -- it has the minimum dimensions for the given model_set_axis\n\n Returns the shape of the input if validation succeeds.\n \"\"\"\n input_shape = np.shape(_input)\n # Ensure that the input's model_set_axis matches the model's\n # n_models\n if input_shape and check_model_set_axis:\n # Note: Scalar inputs *only* get a pass on this\n if len(input_shape) < model_set_axis + 1:\n raise ValueError(\n f\"For model_set_axis={model_set_axis},\"\n f\" all inputs must be at \"\n f\"least {model_set_axis + 1}-dimensional.\")\n if input_shape[model_set_axis] > 1 and (\n input_shape[model_set_axis] != self._n_models):\n try:\n argname = argnames[idx]\n except IndexError:\n # the case of model.inputs = ()\n argname = str(idx)\n\n raise ValueError(\n f\"Input argument '{argname}' does not have the correct \"\n f\"dimensions in model_set_axis={model_set_axis} for a \"\n f\"model set with \"\n f\"n_models={self._n_models}.\")\n return input_shape\n\n\nclass KidsReadoutNoiseModel(ComplexModel):\n \"\"\"\n A model of the TolTEC KIDs readout noise.\n\n \"\"\"\n logger = get_logger()\n\n n_inputs = 1\n n_outputs = 1\n\n def __init__(self, scale_factor=1.0, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._inputs = ('S21', )\n self._outputs = ('dS21', )\n self._scale_factor = scale_factor\n\n def evaluate(self, S21):\n n = self._scale_factor\n shape = S21.shape\n dI = np.random.normal(0, n, shape)\n dQ = np.random.normal(0, n, shape)\n return dI + 1.j * dQ\n\n def evaluate_tod(self, apt, S21):\n \"\"\"Make readout noise in ADU.\"\"\"\n\n dS21 = self(S21)\n dS21 = dS21 * apt['sigma_readout'][:, np.newaxis]\n return dS21\n\n\ndef _get_default_passbands():\n \"\"\"Return the default TolTEC passband tables as a dict.\n \"\"\"\n from ...cal.toltec import ToltecPassband\n calobj = ToltecPassband.from_indexfile(get_pkg_data_path().joinpath(\n 'cal/toltec_passband/index.yaml'\n ))\n result = dict()\n for array_name in calobj.array_names:\n result[array_name] = calobj.get(array_name=array_name)\n return result\n\n\nclass ToltecArrayPowerLoadingModel(Model):\n \"\"\"\n A model of the LMT optical loading at the TolTEC arrays.\n\n This is based on the Mapping-speed-calculator\n \"\"\"\n\n # TODO allow overwriting these per instance.\n _toltec_passbands = _get_default_passbands()\n _cosmo = default_cosmology.get()\n\n logger = get_logger()\n\n n_inputs = 1\n n_outputs = 2\n\n @property\n def input_units(self):\n return {self.inputs[0]: u.deg}\n\n def __init__(self, array_name, atm_model_name='am_q50', *args, **kwargs):\n super().__init__(name=f'{array_name}_loading', *args, **kwargs)\n self._inputs = ('alt', )\n self._outputs = ('P', 'nep')\n self._array_name = array_name\n self._array_info = toltec_info[array_name]\n self._passband = self._toltec_passbands[array_name]\n self._f = self._passband['f'].quantity\n # check the f step, they shall be uniform\n df = np.diff(self._f).value\n if np.std(df) / df[0] > 1e-7:\n raise ValueError(\n \"invalid passband format, frequency grid has to be uniform\")\n self._df = self._f[1] - self._f[0]\n self._throughput = self._passband['throughput']\n if atm_model_name is not None:\n self._atm_model, self._atm_tx_model = get_lmt_atm_models(\n name=atm_model_name)\n else:\n self._atm_model = None\n # we still need the atm transmission for calculating efficiency\n # TODO revisit this\n _, self._atm_tx_model = get_lmt_atm_models(\n name='am_q50')\n\n @property\n def has_atm_model(self):\n return self._atm_model is not None\n\n @classproperty\n def _internal_params(cls):\n \"\"\"Lower level instrument parameters for LMT/TolTEC.\n\n Note that all these values does not take into account the\n passbands, and are frequency independent.\n \"\"\"\n # TODO merge this to the instrument fact yaml file?\n p = {\n 'det_optical_efficiency': 0.8,\n 'det_noise_factor': 0.334,\n 'horn_aperture_efficiency': 0.35,\n 'tel_diameter': 48. << u.m,\n 'tel_surface_rms': 76. << u.um,\n 'tel_emissivity': 0.06,\n 'T_coldbox': 5.75 << u.K,\n 'T_tel': 273. << u.K, # telescope ambient temperature\n 'T_coupling_optics': 290. << u.K, # coupling optics\n }\n # derived values\n p['tel_area'] = np.pi * (p['tel_diameter'] / 2.) ** 2\n # effective optics temperature due to telescope and the coupling\n p['T_warm'] = (\n p['tel_emissivity'] * p['T_tel']\n # TODO add documents for the numbers here\n + 3. * p['T_coupling_optics'] * 0.01\n )\n # cold efficiency is the efficiency inside the cold box.\n p['cold_efficiency'] = (\n p['det_optical_efficiency'] * p['horn_aperture_efficiency'])\n # effetive temperature at detectors for warm components through\n # the cold box\n p['T_det_warm'] = (p['T_warm'] * p['cold_efficiency'])\n # effetive temperature at detectors for cold box\n # note that the \"horn aperture efficiency\" is actually the\n # internal system aperture efficiency since it includes the\n # truncation of the lyot stop and the loss to the cold optics\n p['T_det_coldbox'] = (\n p['T_coldbox'] * p['det_optical_efficiency']\n * (1. - p['horn_aperture_efficiency'])\n )\n return p\n\n @property\n def _tel_primary_surface_optical_efficiency(self):\n \"\"\"The telescope optical efficiency due to RMS of the\n primary surface over the passband.\n\n This is just the Ruze formula.\n \"\"\"\n tel_surface_rms = self._internal_params['tel_surface_rms']\n f = self._f\n return np.exp(-((4.0 * np.pi * tel_surface_rms)/(const.c / f)) ** 2)\n\n @property\n def _system_efficiency(self):\n \"\"\"The overall system efficiency over the passband.\"\"\"\n return (\n self._tel_primary_surface_optical_efficiency\n * self._internal_params['cold_efficiency']\n * self._throughput\n )\n\n @staticmethod\n def _wsum(q, w):\n \"\"\"Return weighted sum of some quantity.\n\n q : `astropy.units.Quantity`\n The quantity.\n\n w : float\n The wegith.\n \"\"\"\n if w.ndim > 1:\n raise ValueError(\"weight has to be 1d\")\n return np.nansum(q * w, axis=-1) / np.nansum(w)\n\n def _get_T_atm(\n self, alt,\n return_avg=False):\n \"\"\"Return the atmosphere temperature.\n\n This is the \"true\" temperature without taking into account the system\n efficiency.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n atm_model = self._atm_model\n if atm_model is None:\n return np.squeeze(np.zeros((alt.size, self._f.size)) << u.K)\n # here we put the alt on the first axis for easier reduction on f.\n T_atm = atm_model(*np.meshgrid(self._f, alt, indexing='ij')).T\n if return_avg:\n T_atm = self._wsum(T_atm, self._throughput)\n T_atm = np.squeeze(T_atm)\n return T_atm\n\n def _get_tx_atm(self, alt):\n \"\"\"Return the atmosphere transmission.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n \"\"\"\n atm_tx_model = self._atm_tx_model\n # here we put the alt on the first axis for easier reduction on f.\n tx_atm = atm_tx_model(*np.meshgrid(self._f, alt, indexing='ij')).T\n tx_atm = np.squeeze(tx_atm)\n return tx_atm\n\n def _get_T(\n self, alt,\n return_avg=False\n ):\n \"\"\"Return the effective temperature at altitude `alt`, as seen\n by the cryostat.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n T_atm = self._get_T_atm(alt, return_avg=False)\n # add the telescope warm component temps\n T_tot = T_atm + self._internal_params['T_warm']\n if return_avg:\n T_tot = self._wsum(T_tot, self._system_efficiency)\n return T_tot\n\n def _get_T_det(\n self, alt,\n return_avg=True):\n \"\"\"Return the effective temperature seen by the detectors\n at altitude `alt`.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n T_atm = self._get_T_atm(alt, return_avg=False)\n # TODO why no telescope efficiency term?\n T_det = (\n T_atm * self._internal_params['cold_efficiency']\n + self._internal_params['T_det_warm']\n + self._internal_params['T_det_coldbox']\n ) * self._throughput\n if return_avg:\n # note this is different from the Detector.py in that\n # does not mistakenly (?) average over the passband again\n T_det = np.mean(T_det)\n return T_det\n\n def _T_to_dP(self, T):\n \"\"\"Return the Rayleigh-Jeans power for the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n \"\"\"\n # power from RJ source in frequency bin df\n # TODO this can be done this way because we ensured df is contant\n # over the passband.\n # we may change this to trapz to allow arbitrary grid?\n return const.k_B * T * self._df\n\n def _T_to_dnep(self, T):\n \"\"\"Return the photon noise equivalent power in W / sqrt(Hz) for\n the passband frequency bins.\n \"\"\"\n f = self._f\n df = self._df\n dP = self._T_to_dP(T)\n\n shot = 2. * const.k_B * T * const.h * f * df\n wave = 2. * dP ** 2 / df\n return np.sqrt(shot + wave)\n\n def _T_to_dnet_cmb(self, T, tx_atm):\n \"\"\"Return the noise equivalent CMB temperature in K / sqrt(Hz) for\n the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n tx_atm : array\n The atmosphere transmission.\n \"\"\"\n f = self._f\n df = self._df\n Tcmb = self._cosmo.Tcmb(0)\n\n dnep = self._T_to_dnep(T)\n x = const.h * f / (const.k_B * Tcmb)\n net_integrand = (\n (const.k_B * x) ** 2.\n * (1. / const.k_B)\n * np.exp(x) / (np.expm1(x)) ** 2.\n )\n dnet = dnep / (\n np.sqrt(2.0)\n * self._system_efficiency\n * net_integrand\n * df)\n # scale by the atmosphere transmission so this is comparable\n # to astronomical sources.\n return dnet / tx_atm\n\n def _dnep_to_dnefd(self, dnep, tx_atm):\n \"\"\"Return the noise equivalent flux density in Jy / sqrt(Hz) for\n the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n tx_atm : array\n The atmosphere transmission.\n \"\"\"\n df = self._df\n A = self._internal_params['tel_area']\n # TODO Z. Ma: I combined the sqrt(2) term. need to check the eqn here.\n dnefd = (\n dnep\n / (A * df)\n / self._system_efficiency\n * np.sqrt(2.))\n # scale by the atmosphere transmission so this is comparable\n # to astronomical sources.\n return dnefd / tx_atm # Jy / sqrt(Hz)\n\n def _get_P(self, alt):\n \"\"\"Return the detector power loading at altitude `alt`.\n\n \"\"\"\n T_det = self._get_T_det(alt=alt, return_avg=False)\n return np.nansum(self._T_to_dP(T_det), axis=-1).to(u.pW)\n\n def _get_dP(self, alt, f_smp):\n \"\"\"Return the detector power loading uncertainty according to the nep\n \"\"\"\n return (\n self._get_noise(alt)['nep']\n * np.sqrt(f_smp / 2.)).to(u.pW)\n\n def _get_noise(self, alt, return_avg=True):\n \"\"\"Return the noise at altitude `alt`.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the value integrated for the passband.\n \"\"\"\n # noise calculations\n # strategy is to do this for each frequency bin and then do a\n # weighted average across the band. This is copied directly from\n # Sean's python code.\n T_det = self._get_T_det(alt=alt, return_avg=False)\n dnep_phot = self._T_to_dnep(T_det)\n\n # detector noise factor coefficient\n det_noise_coeff = np.sqrt(\n 1. + self._internal_params['det_noise_factor'])\n\n dnep = dnep_phot * det_noise_coeff\n\n # atm transmission\n tx_atm = self._get_tx_atm(alt)\n # the equivalent noise in astronomical units\n dnet_cmb = (\n self._T_to_dnet_cmb(T_det, tx_atm=tx_atm)\n * det_noise_coeff\n )\n dnefd = self._dnep_to_dnefd(dnep, tx_atm=tx_atm)\n\n if return_avg:\n # integrate these up\n net_cmb = np.sqrt(1.0 / np.nansum(dnet_cmb ** (-2.0), axis=-1))\n nefd = np.sqrt(1.0 / np.nansum(dnefd ** (-2.0), axis=-1))\n # nep is sum of squares\n nep = np.sqrt(np.nansum(dnep ** 2.0, axis=-1))\n # power just adds\n return {\n 'net_cmb': net_cmb.to(u.mK * u.Hz ** -0.5),\n 'nefd': nefd.to(u.mJy * u.Hz ** -0.5),\n 'nep': nep.to(u.aW * u.Hz ** -0.5)\n }\n return {\n 'dnet_cmb': net_cmb.to(u.mK * u.Hz ** -0.5),\n 'dnefd': nefd.to(u.mJy * u.Hz ** -0.5),\n 'dnep': nep.to(u.aW * u.Hz ** -0.5)\n }\n\n def make_summary_table(self, alt=None):\n \"\"\"Return a summary for a list of altitudes.\n\n \"\"\"\n if alt is None:\n alt = [50., 60., 70.] << u.deg\n result = dict()\n result['P'] = self._get_P(alt)\n result.update(self._get_noise(alt, return_avg=True))\n return Table(result)\n\n def evaluate(self, alt):\n P = self._get_P(alt)\n nep = self._get_noise(alt, return_avg=True)['nep']\n return P, nep\n\n def sky_sb_to_pwr(self, det_s):\n \"\"\"Return detector power loading for given on-sky surface brightness.\n \"\"\"\n # note that this is approximate using a square passband.\n wl_center = self._array_info['wl_center']\n pb_width = self._array_info['passband']\n tb = det_s.to(\n u.K,\n equivalencies=u.brightness_temperature(\n wl_center))\n p = (\n tb.to(\n u.J,\n equivalencies=u.temperature_energy())\n * pb_width\n ).to(u.pW)\n # the sys eff is also approximate\n sys_eff = self._wsum(\n self._system_efficiency, self._throughput\n )\n return p * sys_eff\n\n @contextmanager\n def eval_interp_context(self, alt_grid):\n interp_kwargs = dict(kind='linear')\n with timeit(\n f\"setup power loading model for {self._array_name} \"\n f\"eval interp context with \"\n f\"alt_grid=[{alt_grid.min()}:{alt_grid.max()}] \"\n f\"size={len(alt_grid)}\"):\n self._p_pW_interp = interp1d(\n alt_grid.to_value(u.deg),\n self._get_P(alt_grid).to_value(u.pW),\n **interp_kwargs\n )\n one_Hz = 1 << u.Hz\n self._dp_pW_interp_unity_f_smp = interp1d(\n alt_grid.to_value(u.deg),\n self._get_dP(alt_grid, one_Hz).to_value(u.pW),\n **interp_kwargs\n )\n yield self\n self._p_pW_interp = None\n self._dp_pW_interp_unity_f_smp = None\n\n def evaluate_tod(\n self,\n det_alt,\n f_smp=1 << u.Hz,\n random_seed=None,\n return_realized_noise=True,\n ):\n \"\"\"Return the array power loading along with the noise.\"\"\"\n\n if self._p_pW_interp is None:\n # no interp, direct eval\n alt = np.ravel(det_alt)\n det_pwr = self._get_P(alt).to(u.pW).reshape(det_alt.shape),\n det_delta_pwr = self._get_dP(alt, f_smp).reshape(\n det_alt.shape).to(u.pW),\n else:\n det_pwr = self._p_pW_interp(det_alt.degree) << u.pW\n one_Hz = 1. << u.Hz\n det_delta_pwr = (self._dp_pW_interp_unity_f_smp(\n det_alt.degree) << u.pW) * np.sqrt(f_smp / one_Hz)\n if not return_realized_noise:\n return det_pwr, det_delta_pwr\n # realize noise\n rng = np.random.default_rng(seed=random_seed)\n det_noise = rng.normal(0., det_delta_pwr.to_value(u.pW)) << u.pW\n # calc the median P and dP for logging purpose\n med_alt = np.median(det_alt)\n med_P = self._get_P(med_alt).to(u.pW)\n med_dP = self._get_dP(med_alt, f_smp).to(u.aW)\n self.logger.debug(\n f\"array power loading at med_alt={med_alt} P={med_P} dP={med_dP}\")\n return det_pwr, det_noise\n\n\nclass ToltecPowerLoadingModel(PowerLoadingModel):\n \"\"\"\n A wrapper model to calculate power loading for all the TolTEC arrays.\n\n This model in-corporates both the \"static\" am_qxx models and the toast\n model.\n \"\"\"\n\n logger = get_logger()\n array_names = toltec_info['array_names']\n\n n_inputs = 3\n n_outputs = 1\n\n def __init__(\n self, atm_model_name, atm_model_params=None,\n atm_cache_dir=None\n ):\n if atm_model_name is None or atm_model_name == 'toast':\n # this will disable the atm component in the power loading model\n # but still create one for system efficiency calculation\n _atm_model_name = None\n else:\n _atm_model_name = atm_model_name\n self._array_power_loading_models = {\n array_name: ToltecArrayPowerLoadingModel(\n array_name=array_name,\n atm_model_name=_atm_model_name)\n for array_name in self.array_names\n }\n if atm_model_name == 'toast':\n self._toast_atm_evaluator = ToastAtmEvaluator(\n cache_dir=atm_cache_dir,\n params=atm_model_params)\n else:\n self._toast_atm_evaluator = None\n super().__init__(name='toltec_power_loading')\n self.inputs = ('array_name', 'S', 'alt')\n self.outputs = ('P', )\n self._atm_model_name = atm_model_name\n\n @property\n def atm_model_name(self):\n return self._atm_model_name\n\n def evaluate(self):\n # TODO\n # implement the default behavior for the model\n return NotImplemented\n\n def aplm_eval_interp_context(\n self, t0, t_grid,\n sky_bbox_altaz, alt_grid):\n \"\"\"Context manager that pre-calculate the interp for array power\n loading model.\n \"\"\"\n es = ExitStack()\n for m in self._array_power_loading_models.values():\n es.enter_context(m.eval_interp_context(alt_grid))\n # setup the toast eval context\n if self._toast_atm_evaluator is not None:\n es.enter_context(self._toast_atm_evaluator.setup(\n t0=t0,\n t_grid=t_grid,\n sky_bbox_altaz=sky_bbox_altaz,\n alt_grid=alt_grid,\n ))\n return es\n\n def get_P(self, det_array_name, det_az, det_alt):\n \"\"\"Evaluate the power loading model only and without noise.\"\"\"\n p_out = np.zeros(det_alt.shape) << u.pW\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n if self.atm_model_name == 'toast':\n p = self._toast_atm_evaluator.calc_toast_atm_pwr_for_array(\n array_name=array_name,\n det_az=det_az[mask],\n det_alt=det_alt[mask])\n else:\n # use the ToltecArrayPowerLoadingModel\n p, _ = aplm.evaluate_tod(\n det_alt[mask], return_realized_noise=False)\n p_out[mask] = p\n return p_out\n\n def sky_sb_to_pwr(self, det_array_name, det_s):\n p_out = np.zeros(det_s.shape) << u.pW\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n # compute the power loading from on-sky surface brightness\n p_out[mask] = aplm.sky_sb_to_pwr(det_s=det_s[mask])\n return p_out\n\n def evaluate_tod(\n self, det_array_name, det_s, det_az, det_alt,\n f_smp,\n noise_seed=None,\n ):\n p_out = self.sky_sb_to_pwr(det_array_name, det_s)\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n if self.atm_model_name is None:\n # atm is disabled\n pass\n elif self.atm_model_name == 'toast':\n p = self._toast_atm_evaluator.calc_toast_atm_pwr_for_array(\n array_name=array_name,\n det_az=det_az[mask],\n det_alt=det_alt[mask])\n p_out[mask] += p\n else:\n # use the ToltecArrayPowerLoadingModel atm\n p, p_noise = aplm.evaluate_tod(\n det_alt=det_alt[mask],\n f_smp=f_smp,\n random_seed=noise_seed,\n return_realized_noise=True,\n )\n p_out[mask] += (p + p_noise)\n return p_out\n\n def __str__(self):\n return (\n f'{self.__class__.__name__}(atm_model_name={self.atm_model_name})')\n\n\n@add_schema\n@dataclass\nclass ToastAtmConfig(object):\n \"\"\"The config class for TOAST atm model.\"\"\"\n lmin_center: u.Quantity = field(\n default=0.01 << u.meter,\n metadata={\n 'description': 'The lmin_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmin_sigma: u.Quantity = field(\n default=0.001 << u.meter,\n metadata={\n 'description': 'The lmin_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmax_center: u.Quantity = field(\n default=10.0 << u.meter,\n metadata={\n 'description': 'The lmax_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmax_sigma: u.Quantity = field(\n default=10.0 << u.meter,\n metadata={\n 'description': 'The lmax_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n z0_center: u.Quantity = field(\n default=2000.0 << u.meter,\n metadata={\n 'description': 'The z0_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n z0_sigma: u.Quantity = field(\n default=0.0 << u.meter,\n metadata={\n 'description': 'The z0_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n zatm: u.Quantity = field(\n default=40000.0 << u.meter,\n metadata={\n 'description': 'The zatm value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n zmax: u.Quantity = field(\n default=2000.0 << u.meter,\n metadata={\n 'description': 'The zmax value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n\n class Meta:\n schema = {\n 'ignore_extra_keys': False,\n 'description': 'The parameters related to TOAST atm model.'\n }\n\n\nclass ToastAtmEvaluator(object):\n \"\"\"A helper class to work with the Toast Atm model class.\"\"\"\n\n def __init__(self, cache_dir=None, params=None):\n self._cache_dir = cache_dir\n if params is None:\n params = ToastAtmConfig()\n self._params = params\n self._toast_atm_simu = None\n\n @contextmanager\n def setup(self, t0, t_grid, sky_bbox_altaz, alt_grid):\n \"\"\"A context for TOAST atm calculation.\"\"\"\n # initialize the toast atm model\n # create the ToastAtmosphereSimulation instance here with\n # self._params and the sky bbox, and compute the atm slabs\n from . import toast_atm\n\n init_kwargs = {\n 't0': t0,\n 'tmin': t0.unix,\n 'tmax': (t0 + t_grid[-1]).unix,\n 'azmin': sky_bbox_altaz.w,\n 'azmax': sky_bbox_altaz.e,\n 'elmin': sky_bbox_altaz.s,\n 'elmax': sky_bbox_altaz.n,\n 'cachedir': self._cache_dir\n }\n self.logger.debug(\n f\"init toast atm simulation with:\\n{pformat_yaml(init_kwargs)}\"\n )\n toast_atm_simu = self._toast_atm_simu = \\\n toast_atm.ToastAtmosphereSimulation(**init_kwargs)\n # here we can pass the atm params to toast for generating the slabs\n setup_params = self._params\n\n self.logger.debug(\n f\"setup toast atm simulation slabs with params:\\n\"\n f\"{pformat_yaml(setup_params)}\")\n toast_atm_simu.generate_simulation(**self._params.to_dict())\n yield\n # clean up the context\n self._toast_atm_simu = None\n\n def calc_toast_atm_pwr_for_array(self, array_name, det_az, det_alt):\n toast_atm_simu = self._toast_atm_simu\n if toast_atm_simu is None:\n raise RuntimeError(\n \"The toast atm simulator is not setup.\")\n # TODO\n # implement this to do integral for each det position\n # at each time for a single array given by array_name\n raise NotImplementedError(\"toast atm is not implemented yet\")\n" ]
[ [ "numpy.ones", "numpy.sum", "numpy.diff", "numpy.nansum", "numpy.isscalar", "numpy.meshgrid", "numpy.vstack", "numpy.cos", "numpy.expm1", "numpy.mean", "numpy.zeros", "numpy.random.normal", "numpy.median", "numpy.tan", "numpy.std", "numpy.array", "numpy.random.default_rng", "numpy.squeeze", "numpy.empty", "numpy.exp", "numpy.ravel", "numpy.shape", "numpy.sqrt", "numpy.sin" ] ]
kagemusha/streamingbandit
[ "f5228611a0ac9432e958761dd6d68d972d7d163b" ]
[ "app/libs/thompson_bayesian_linear.py" ]
[ "# -*- coding: utf-8 -*-\nimport numpy as np\nfrom libs.base import *\nimport ast\n\nglobal numpy\n\nclass ThompsonBayesianLinear():\n \"\"\" Class for Thompson sampling for Bayesian Linear Regression\n \n :var dict default: The value of the model, consisting of a 1*p \\\n list of J, p*p list of P and an error rate.\n \"\"\"\n def __init__(self, default):\n if default == {}:\n self.value = {'J' : [0, 0], 'P' : [[1, 0],[0, 1]], 'err' : 1}\n else:\n self.value = default.copy()\n if isinstance(self.value['J'], str) == True:\n self.value['J'] = ast.literal_eval(self.value['J'])\n if isinstance(self.value['P'], str) == True:\n self.value['P'] = ast.literal_eval(self.value['P'])\n if isinstance(self.value['err'], str) == True:\n self.value['err'] = ast.literal_eval(self.value['err'])\n self.value['J'] = np.matrix(self.value['J'])\n self.value['P'] = np.matrix(self.value['P'])\n\n def get_dict(self):\n \"\"\" Return all the variables that are needed to do an online estimation \\\n in a dictionary. Or to save the parameters in a database.\n \"\"\"\n to_dict = self.value.copy()\n to_dict['J'] = to_dict['J'].tolist()\n to_dict['P'] = to_dict['P'].tolist()\n return to_dict\n\n def update(self, y, x, discount = 1):\n \"\"\" Update the Bayesian linear model.\n \n :param int y: The observation value.\n :param list x: A list of ints of the regressors.\n :param int discount: A discount. Default is 1, which means no discount is used.\n \"\"\"\n y = y\n x = np.matrix(x)\n self.value['J'] = (discount*(x*y)/self.value['err']) + self.value['J']\n self.value['P'] = (discount*(x.T*x)/self.value['err']) + self.value['P']\n \n def sample(self):\n \"\"\" Return a sample of coefficients Betas using Thompson sampling.\n\n \"\"\"\n # Transform J = Sigma^-1 * mu to mu\n # Transform P = Sigma^-1 to Sigma\n sigma = np.linalg.inv(self.value['P'])\n mu = sigma * self.value['J'].T\n mu = np.squeeze(np.asarray(mu))\n # Random draw from np.random.multivariate_normal\n betas = np.random.multivariate_normal(mu,sigma)\n # Prediction is y_t ~ N(betas.T * x, sigma^2)\n #y = np.random.normal(np.dot(betas.T, x), err)\n return betas\n" ]
[ [ "numpy.linalg.inv", "numpy.matrix", "numpy.random.multivariate_normal", "numpy.asarray" ] ]
sdss/chernosim
[ "734f349c82d85dd6a1fe00013d21025ef0e9b845" ]
[ "chernosim/acquisition/acquisition.py" ]
[ "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego ([email protected])\n# @Date: 2020-09-13\n# @Filename: acquisition.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nimport collections\nimport functools\nimport multiprocessing\nimport pathlib\nimport shutil\nimport warnings\n\nimport astropy.table\nimport astropy.wcs\nimport matplotlib.patches\nimport matplotlib.pyplot\nimport numpy\nimport pandas\nimport tqdm\nimport yaml\n\nfrom cherno.astrometry import AstrometryNet\nfrom sdsstools import read_yaml_file\n\nfrom .. import config\nfrom .utils import (create_gfa_wcs, get_gfa_centre, get_uniform_ra_dec,\n get_wcs_rotation, query_field, sky_separation)\n\n\ndef select_stars(data, boresight, observatory='apo',\n r1=None, r2=None, phi=None, gfa_rot=None):\n \"\"\"Selects stars for the simulation.\n\n Given a dataframe with a list of stars, returns a subset of the dataframe\n with stars that fall within the footprint of the GFA chips.\n\n The GFAs are defined as the areas that subtend an angle ``phi`` with\n respect to the boresight in an annulus of radii ``r1`` and ``r2``. The\n rotation angle of each camera is one of the ``gfa_rot`` values, with zero\n degrees corresponding to the direction of the celestial North. This is an\n approximation of the real footprint of the GFA, which are rectangular and\n not an annulus sector, but the areas are comparable and this provides a\n simple way to select the stars.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees.\n boresight : tuple\n A tuple with the right ascension and declination of the boresight,\n in degrees.\n observatory : str\n The observatory, used to load the default configuration for the GFAs.\n r1,r2 : float\n The internal and external radii along which the GFAs are located, in\n degrees.\n phi : float\n The angle subtended by each GFA, in degrees.\n gfa_rot : list\n A list with the rotation of each GFA, with respect to the boresight,\n in degrees.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe restricted to the stars that fall within the\n footprint of each GFA. A new column ``gfa`` is added with the index\n of the GFA, which correspond to the ``gfa_rot`` rotation.\n\n \"\"\"\n\n # Get data from configuration file if not provided.\n obs_data = config[observatory]\n r1 = r1 or obs_data['r1']\n r2 = r2 or obs_data['r2']\n phi = phi or obs_data['phi']\n gfa_rot = gfa_rot or obs_data['gfa_rot']\n\n b_ra = boresight[0]\n b_dec = boresight[1]\n\n ra_rad = numpy.radians(data.ra)\n dec_rad = numpy.radians(data.dec)\n delta_ra_rad = ra_rad - numpy.radians(b_ra)\n\n # Calculate the separation between each star and the boresight.\n sep = numpy.degrees(\n numpy.arccos(\n numpy.sin(dec_rad) * numpy.sin(numpy.radians(b_dec)) +\n numpy.cos(dec_rad) * numpy.cos(numpy.radians(b_dec)) *\n numpy.cos(delta_ra_rad)\n )\n )\n\n # Remove stars that ar not in the GFA annulus\n data = data.loc[(sep > r1) & (sep < r2)]\n\n if len(data) == 0:\n data.loc[:, ['gfa', 'theta']] = numpy.nan\n return data\n\n sep = sep[(sep > r1) & (sep < r2)]\n sep_rad = numpy.radians(sep)\n\n ra_rad = numpy.radians(data.ra)\n dec_rad = numpy.radians(data.dec)\n delta_ra_rad = ra_rad - numpy.radians(b_ra)\n\n # Calculate the angle, theta, between boresight, North, and the star.\n # We define a spherical triangle with vertices in North, boresight, and\n # each star and use the sine law.\n sin_theta = numpy.sin(delta_ra_rad) * numpy.cos(dec_rad) / numpy.sin(sep_rad)\n theta = numpy.degrees(numpy.arcsin(sin_theta))\n\n # Solve for degeneracy in arcsin.\n theta.loc[data.dec < b_dec] = 180 - theta[data.dec < b_dec]\n theta.loc[theta < 0] += 360\n\n data['theta'] = theta\n\n # Determine the GFA on which footprint each star falls, if any.\n data['gfa'] = -1\n\n for gfa_id in range(len(gfa_rot)):\n rot = gfa_rot[gfa_id]\n rot_min = (rot - phi / 2.) % 360.\n rot_max = (rot + phi / 2.) % 360.\n data.loc[(theta - rot_min) % 360. <=\n (rot_max - rot_min) % 360., 'gfa'] = gfa_id\n\n data = data.loc[data.gfa >= 0]\n\n return data\n\n\ndef radec_to_xy(data, wcs=None, **kwargs):\n \"\"\"Converts ``(RA, Dec)`` to ``(x, y)`` for a given GFA.\n\n Creates a mock WCS transformation for a given GFA and converts star RA, Dec\n to x, y on what would be a GFA image. This conversion is not carefully\n done and is not a proper transformation between on-sky coordinates and\n focal coordinates, but should be sufficient for the purposes of the\n simulation.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees.\n wcs : ~astropy.wcs.WCS\n The WCS object to use. If `None`, it calls `.create_gfa_wcs`.\n kwargs : dict\n Arguments to pass to `.create_gfa_wcs`.\n\n Returns\n -------\n `~pandas.DataFrame`, `~astropy.wcs.WCS`\n The input dataframe with two columns, ``x`` and ``y`` indicating the\n position of the star on the GFA chip, and the `~astropy.wcs.WCS`\n object.\n\n \"\"\"\n\n if len(data) == 0:\n data['x'] = numpy.nan\n data['y'] = numpy.nan\n return data\n\n if not wcs:\n wcs = create_gfa_wcs(**kwargs)\n\n # Convert coordinates to x, y\n coords = data[['ra', 'dec']].to_numpy()\n\n x, y = wcs.wcs_world2pix(coords, 0).T\n data['x'] = x\n data['y'] = y\n\n return data, wcs\n\n\ndef prepare_data(boresight, data=None, observatory='apo', r1=None, r2=None,\n mag_range=None, mag_column=None, phi=None, gfa_rot=None,\n shape=None, pixel_size=None, plate_scale=None, plot=False,\n apply_proper_motion=False, ref_epoch=2015.5, epoch=False,\n database_params=None):\n \"\"\"Prepares data to be matched by astrometry.net.\n\n Performs the following steps:\n\n - Queries the database to receive the list of observed stars.\n\n - Applies proper motions.\n\n - Select stars that fall within the footprint of the GFAs, for a\n given footprint.\n\n - Calculates the WCS of each GFA and convert the input coordinates to\n pixel coordinates on the GFA image.\n\n - Creates a global WCS for the full FOV of the telescope, with zero on\n the boresight, and converts the input coordinates to pseudo-pixels in\n that frame.\n\n Parameters\n ----------\n boresight : tuple\n A tuple with the right ascension and declination of the boresight,\n in degrees.\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees. If `None`, calls `.query_field` to\n retrieve a list of stars from the database.\n observatory : str\n The observatory, used to load the default configuration for the GFAs.\n r1,r2 : float\n The internal and external radii along which the GFAs are located, in\n degrees.\n mag_range : tuple\n The range of magnitudes used to select stars.\n mag_column : str\n The name of the magnitude column to query.\n phi : float\n The angle subtended by each GFA, in degrees.\n gfa_rot : list\n A list with the rotation of each GFA, with respect to the boresight,\n in degrees.\n shape : tuple\n Number of pixels, in the x and y direction of the GFA chip.\n pixel_size : float\n The pixel size, in microns.\n plate_scale : float\n The plate scale, in mm/deg.\n plot : bool or str\n Whether to produce a plot with the input stars, GFA centres, and\n footprints. If a string, the path where to save the plot.\n apply_proper_motion : bool\n Whether to propagate the position to a given ``epoch``. Assumes the\n data returned by `.query_field` has columns ``pmra`` and ``pmdec`` in\n mas and that ``pmra`` contains a factor with the cosine of declination.\n ref_epoch : float\n The epoch of the catalogue, as a Julian year.\n epoch : float\n The epoch of the observation, as a Julian year.\n database_params : dict\n A dictionary of database parameters to be passed to `.query_field`.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe restricted to the stars that fall within the\n footprint of each GFA and with additional column indicating the GFA\n chip and x and y positions on that chip, and the global x and y\n pixel coordinates on the pseudo-frame of the FOV.\n\n \"\"\"\n\n b_ra, b_dec = boresight\n\n if data is None:\n data = query_field(boresight, r1=r1, r2=r2, observatory=observatory,\n mag_range=mag_range, mag_column=mag_column,\n database_params=database_params)\n\n data = select_stars(data, boresight, observatory=observatory,\n r1=r1, r2=r2, phi=phi, gfa_rot=gfa_rot)\n\n if apply_proper_motion:\n assert epoch is not None, 'epoch is needed to apply proper motions.'\n data['ra_orig'] = data.ra\n data['dec_orig'] = data.dec\n pmra = data.pmra / 1000 / 3600. / numpy.cos(numpy.radians(data.dec))\n pmdec = data.pmdec / 1000 / 3600.\n data.ra += pmra * (epoch - ref_epoch)\n data.dec += pmdec * (epoch - ref_epoch)\n # Deal with NaN in pmra/pmdec\n data.ra = data.ra.fillna(data.ra_orig)\n data.dec = data.dec.fillna(data.dec_orig)\n\n obs_data = config[observatory]\n gfa_rot = gfa_rot or obs_data['gfa_rot']\n plate_scale = plate_scale or obs_data['plate_scale']\n pixel_size = pixel_size or config['gfa']['pixel_size']\n\n wcs = [create_gfa_wcs(rot,\n boresight,\n observatory='apo',\n r1=r1, r2=r2,\n shape=shape,\n pixel_size=pixel_size,\n plate_scale=plate_scale)\n for rot in gfa_rot]\n\n data = data.groupby('gfa').apply(\n lambda data_gfa: radec_to_xy(data_gfa,\n wcs=wcs[data_gfa.gfa.iloc[0]])[0])\n\n if plot is not False and plot is not None:\n\n fig, ax = matplotlib.pyplot.subplots()\n\n centres = numpy.array([get_gfa_centre(rot,\n boresight,\n observatory=observatory,\n r1=r1, r2=r2)\n for rot in gfa_rot])\n\n ax.scatter(data.ra, data.dec, s=1.0, marker='.', color='b')\n ax.scatter(centres[:, 0], centres[:, 1], s=5.0, marker='x', color='r')\n\n obs_data = config[observatory]\n shape = shape or config['gfa']['shape']\n\n for ww in wcs:\n footprint = ww.calc_footprint(axes=shape)\n rect = matplotlib.patches.Polygon(footprint, facecolor='None',\n edgecolor='k', linewidth=1)\n ax.add_patch(rect)\n\n ax.set_xlim(b_ra + 1.6 / numpy.cos(numpy.radians(b_dec)),\n b_ra - 1.6 / numpy.cos(numpy.radians(b_dec)))\n ax.set_ylim(b_dec - 1.6, b_dec + 1.6)\n\n n_stars = data.groupby('gfa').size().tolist()\n ax.set_title(f'(alpha, delta)=({b_ra:.2f}, {b_dec:.2f})\\n '\n f'n_stars={sum(n_stars)} '\n f'({\", \".join(map(str, n_stars))})')\n\n ax.set_xlabel('Right Ascension [deg]')\n ax.set_ylabel('Declination [deg]')\n\n fig.savefig(plot or 'gfa.pdf')\n\n return data\n\n\ndef add_noise(data, fwhm, detection_rate=0.95, non_detection_factor=1,\n mag_thres=13, mag_column='phot_g_mean_mag'):\n r\"\"\"Adds centroiding noise to the catalogue data.\n\n Modifies the pixel coordinates in the ``data`` dataframe, adding Gaussian\n noise with :math:`\\sigma={\\rm FWHM}/2\\sqrt{2\\ln 2}` to simulate seeing.\n If ``detection_rate`` is less than 1, targets are marked as detected or\n non-detected based on the ``dection_rate`` logarithmically scaled with\n magnitude.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``x`` and ``y``, with the pixel coordinates.\n fwhm : float\n The FWHM of the Gaussian noise to add. It must be in pixel units.\n detection_rate : float\n The probability of a source to be detected and its centroid measured.\n non_detection_factor : float\n A proportional factor used to weight the detection rate so that\n :math:`d=d_0-a\\log(m-m_0)` where :math:`d` is the final detection rate\n that will be applied to a target, :math:`d_0` is initial\n ``detection_rate`, :math:`m` is the magnitude of the source,\n :math:`m_0` is ``mag_thres``, and :math:`a` is\n ``non_detection_factor``.\n mag_thres : float\n The magnitude above which the detection rate will be reduced\n logarithmically.\n mag_column : str\n The name of the magnitude column in the dataframe.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe in which the pixel coordinates have been modified\n to add centroiding noise. An additional boolean column, ``detected``,\n is added to indicate whether the source has been detected following the\n logic described for ``detection_rate``.\n\n \"\"\"\n\n data['x_no_noise'] = data.loc[:, 'x']\n data['y_no_noise'] = data.loc[:, 'y']\n\n sigma = fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n\n n = data.shape[0]\n data.x += numpy.random.normal(0, sigma, n)\n data.y += numpy.random.normal(0, sigma, n)\n\n data['detected'] = True\n\n if detection_rate >= 1.0 or not mag_column:\n return data\n\n if not mag_thres:\n mag_thres = data[mag_column].max()\n\n delta_mag = data[mag_column] - mag_thres\n delta_mag[delta_mag < 0] = 0.\n\n detection_rate = numpy.tile(detection_rate, len(data))\n detection_rate[delta_mag > 0] -= (numpy.log10(delta_mag[delta_mag > 0]) *\n non_detection_factor)\n\n non_detected = numpy.random.uniform(size=n) > detection_rate\n data.loc[:, 'detected'] = ~non_detected\n\n return data\n\n\ndef _do_one_field(fields, config_data, observatory, output_dir,\n n_attempts, field_id, data=None, overwrite=False):\n \"\"\"Simulates one field.\"\"\"\n\n boresight = fields[field_id]\n\n plate_scale = config_data[observatory]['plate_scale']\n pixel_size = config_data['gfa']['pixel_size']\n pixel_scale = pixel_size / 1000. / plate_scale * 3600. # In arcsec\n\n field_dir = (output_dir / f'{field_id:05d}').absolute()\n if field_dir.exists():\n if overwrite:\n shutil.rmtree(field_dir)\n else:\n raise RuntimeError(f'{field_dir!s} already exists.')\n field_dir.mkdir(parents=True, exist_ok=True)\n\n numpy.random.seed(config_data['seed'] + field_id)\n\n astrometry_cfg = config_data['astrometry.cfg']\n if not astrometry_cfg:\n astrometry_cfg = (pathlib.Path(__file__).parent.absolute() /\n 'etc/astrometry.cfg')\n\n if not data or field_id not in data:\n star_data = prepare_data(boresight,\n observatory=observatory,\n mag_range=config_data['mag_range'],\n mag_column=config_data['mag_column'],\n apply_proper_motion=True,\n epoch=config_data['epoch'],\n plot=False,\n shape=config_data['gfa']['shape'],\n pixel_size=config_data['gfa']['pixel_size'],\n database_params=config_data['database'],\n **config_data[observatory])\n else:\n star_data = pandas.read_hdf(data[field_id])\n\n mag_column = config_data['mag_column']\n star_data.sort_values(mag_column, inplace=True)\n\n star_data.to_hdf(field_dir / f'data_{field_id:05d}.h5', 'data')\n\n if 'limit_mag_range' in config_data and config_data['limit_mag_range']:\n limit_mag_range = config_data['limit_mag_range']\n star_data = star_data[(star_data[mag_column] >= limit_mag_range[0]) &\n (star_data[mag_column] <= limit_mag_range[1])]\n\n gfa_rot = config_data[observatory]['gfa_rot']\n gfa_centres = {gfa_id: get_gfa_centre(gfa_rot[gfa_id],\n boresight,\n observatory=observatory).tolist()\n for gfa_id in range(len(gfa_rot))}\n\n for nn in range(n_attempts):\n\n n_att = nn + 1\n prefix = f'_{field_id:05d}_{n_att:03d}'\n\n log_config = {}\n log_config['input'] = {}\n log_input = log_config['input']\n\n log_input['boresight'] = boresight\n log_input['observatory'] = observatory\n log_input['field_id'] = field_id\n log_input['attempt_id'] = n_att\n log_input['gfa_centres'] = gfa_centres\n\n att_dir = field_dir / f'{n_att:03d}'\n if att_dir.exists():\n shutil.rmtree(att_dir)\n att_dir.mkdir(parents=True, exist_ok=True)\n\n fwhm = numpy.random.uniform(*config_data['fwhm_range'])\n log_input['fwhm'] = fwhm\n\n att_data = star_data.copy()\n att_data = add_noise(\n att_data, fwhm / pixel_scale,\n detection_rate=config_data['detection_rate'],\n non_detection_factor=config_data['non_detection_factor'],\n mag_thres=config_data['mag_thres'],\n mag_column=config_data['mag_column'])\n\n log_input['n_stars'] = len(att_data)\n log_input['n_detected'] = len(att_data[att_data.detected])\n\n gfa_ids = range(config_data['gfa']['n_cameras'])\n log_input['n_stars_per_gfa'] = {i: 0 for i in gfa_ids}\n log_input['n_detected_per_gfa'] = {i: 0 for i in gfa_ids}\n\n att_data.to_hdf(att_dir / f'data{prefix}.in.h5', 'data')\n\n gfa_xyls = {}\n\n for gfa_id in att_data.gfa.unique():\n\n gfa_table = astropy.table.Table.from_pandas(\n att_data.loc[(att_data.gfa == gfa_id) & att_data.detected])\n\n n_stars_gfa = len(att_data.loc[(att_data.gfa == gfa_id)])\n n_detected = len(gfa_table)\n\n gfa_table.write(att_dir / f'gfa{gfa_id}{prefix}.xyls',\n format='fits', overwrite=True)\n gfa_xyls[gfa_id] = str(att_dir / f'gfa{gfa_id}{prefix}.xyls')\n\n gfa_id = int(gfa_id) # To avoid YAML serialising as numpy object\n log_input['n_stars_per_gfa'][gfa_id] = n_stars_gfa\n log_input['n_detected_per_gfa'][gfa_id] = n_detected\n\n shutil.copy(astrometry_cfg, att_dir)\n\n with open(att_dir / f'config{prefix}.yaml', 'w') as out:\n out.write(yaml.dump(log_config))\n\n log_config['output'] = {}\n log_output = log_config['output']\n\n astrometry_net = AstrometryNet()\n astrometry_net.configure(\n backend_config=att_dir / pathlib.Path(astrometry_cfg).name,\n width=config_data['gfa']['shape'][0],\n height=config_data['gfa']['shape'][1],\n no_plots=True,\n scale_low=pixel_scale * 0.9,\n scale_high=pixel_scale * 1.1,\n scale_units='arcsecperpix',\n radius=config_data['search_params']['radius'],\n dir=att_dir)\n\n if config_data['search_params']['centre_on_gfa'] is False:\n\n ra_error = 2 * (numpy.random.uniform() - 0.5)\n ra_error *= config_data['search_params']['ra_error']\n dec_error = 2 * (numpy.random.uniform() - 0.5)\n dec_error *= config_data['search_params']['dec_error']\n\n prc = astrometry_net.run(list(gfa_xyls.values()),\n stdout=att_dir / f'stdout{prefix}',\n stderr=att_dir / f'stderr{prefix}',\n ra=boresight[0] + ra_error,\n dec=boresight[1] + dec_error)\n\n log_output['solve_field_time'] = prc.time\n\n else:\n\n prc_time = 0.0\n\n for gfa_id in gfa_xyls:\n\n ra_error = 2 * (numpy.random.uniform() - 0.5)\n ra_error *= config_data['search_params']['ra_error']\n dec_error = 2 * (numpy.random.uniform() - 0.5)\n dec_error *= config_data['search_params']['dec_error']\n\n gfa_centre = gfa_centres[gfa_id]\n\n stdout = att_dir / f'stdout_gfa{gfa_id}{prefix}'\n stderr = att_dir / f'stderr_gfa{gfa_id}{prefix}'\n\n prc = astrometry_net.run([gfa_xyls[gfa_id]],\n stdout=stdout,\n stderr=stderr,\n ra=gfa_centre[0] + ra_error,\n dec=gfa_centre[1] + dec_error)\n\n prc_time += prc.time\n\n log_output['solve_field_time'] = prc_time\n\n log_output['solved'] = {i: False for i in gfa_ids}\n\n att_data['ra_solved'] = numpy.nan\n att_data['dec_solved'] = numpy.nan\n att_data['separation'] = numpy.nan\n\n for gfa_id in gfa_ids:\n\n if not (att_dir / f'gfa{gfa_id}{prefix}.solved').exists():\n continue\n\n log_output['solved'][gfa_id] = True\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n solved_wcs = astropy.wcs.WCS(str(att_dir /\n f'gfa{gfa_id}{prefix}.wcs'))\n\n gfa_idx = att_data.gfa == gfa_id\n\n radec_solved = solved_wcs.wcs_pix2world(\n att_data.loc[gfa_idx, ['x', 'y']].to_numpy(), 0)\n\n att_data.loc[gfa_idx, 'ra_solved'] = radec_solved[:, 0]\n att_data.loc[gfa_idx, 'dec_solved'] = radec_solved[:, 1]\n att_data.loc[gfa_idx, 'separation'] = sky_separation(\n att_data.loc[gfa_idx, 'ra'],\n att_data.loc[gfa_idx, 'dec'],\n att_data.loc[gfa_idx, 'ra_solved'],\n att_data.loc[gfa_idx, 'dec_solved'],\n )\n\n with open(att_dir / f'config{prefix}.yaml', 'w') as out:\n out.write(yaml.dump(log_config))\n\n att_data.to_hdf(att_dir / f'data{prefix}.out.h5', 'data')\n\n\nclass Simulation:\n \"\"\"Runs a simulation using multiprocessing.\n\n Parameters\n ----------\n fields : int or list\n Number of uniformly distributed fields to test or a list of field\n centres.\n output_dir : str\n The root of the directory structure where all the output files will\n be stored.\n observatory : str\n The observatory, either ``'apo'`` or ``'lco'``.\n config_file : str\n The path to the configuration file for the simulation.\n n_attempts : int\n Number of attempts, with randomised noise, to try per field.\n\n \"\"\"\n\n def __init__(self, fields, output_dir, observatory=None,\n config_file=None, n_attempts=10):\n\n self.output_dir = pathlib.Path(output_dir)\n\n if config_file:\n config_data = read_yaml_file(config_file)\n else:\n config_data = config\n\n self.config_data = config_data.copy()\n\n self.observatory = observatory or self.config_data['observatory']\n self.n_attempts = n_attempts\n\n numpy.random.seed(config_data['seed'])\n\n if isinstance(fields, int):\n fields = get_uniform_ra_dec(fields).tolist()\n self.fields = {fid + 1: list(map(float, fields[fid]))\n for fid in range(len(fields))}\n elif isinstance(fields, dict):\n self.fields = fields\n elif isinstance(fields, (tuple, list)):\n self.fields = {fid + 1: list(map(float, fields[fid]))\n for fid in range(len(fields))}\n\n self._data = None\n\n @classmethod\n def from_simulation(cls, path, *args, fields=None, **kwargs):\n \"\"\"Loads fields and data from a different simulation.\"\"\"\n\n path = pathlib.Path(path)\n config_path = path / 'config.yaml'\n\n if fields is None:\n config = yaml.safe_load(open(config_path))\n fields = config['fields']\n\n data = {field_id: (path / f'{field_id:05d}' / f'data_{field_id:05d}.h5')\n for field_id in fields}\n\n obj = cls(fields, *args, **kwargs)\n obj._data = data\n\n return obj\n\n def run(self, n_cpus=None, overwrite=False):\n \"\"\"Run the simulation.\n\n Parameters\n ----------\n n_cpus : int\n Number of CPUs to use. If not defined, uses all the CPUs.\n\n \"\"\"\n\n self.config_data['fields'] = self.fields\n self.config_data['n_attempts'] = self.n_attempts\n self.config_data['observatory'] = self.observatory\n\n n_cpus = n_cpus or multiprocessing.cpu_count()\n\n self.output_dir.mkdir(parents=True, exist_ok=True)\n\n config_path = self.output_dir / 'config.yaml'\n if config_path.exists() and not overwrite:\n raise RuntimeError(f'{config_path!s} already exists.')\n\n with open(config_path, 'w') as out:\n out.write(yaml.dump(self.config_data))\n\n f = functools.partial(_do_one_field, self.fields,\n self.config_data, self.observatory,\n self.output_dir, self.n_attempts,\n data=self._data, overwrite=overwrite)\n\n with tqdm.tqdm(total=len(self.fields)) as pbar:\n with multiprocessing.Pool(processes=n_cpus) as pool:\n for __ in pool.imap(f, self.fields.keys()):\n pbar.update()\n\n\ndef collate_results(path, show_progress=False):\n \"\"\"Collates the results of a simulation.\n\n Parameters\n ----------\n path : str\n The path to a completed simulation.\n show_progress : bool\n Whether to show a progress bar.\n\n Returns\n -------\n dict\n A dictionary with the collated results of the simulation.\n\n \"\"\"\n\n Row = collections.namedtuple('Row', ('observatory', 'field',\n 'attempt', 'gfa',\n 'field_ra', 'field_dec',\n 'fwhm', 'n_stars', 'n_detected',\n 'min_mag', 'max_mag',\n 'solved', 'solve_time_avg',\n 'rot', 'true_rot', 'rmse'))\n\n path = pathlib.Path(path)\n\n config = yaml.safe_load(open(path / 'config.yaml'))\n\n obs = config['observatory']\n gfa_rot = config[obs]['gfa_rot']\n\n fields = config['fields']\n n_attempts = config.get('n_attempts') or config.get('n_attamps')\n mag_column = config['mag_column']\n\n rows = [None] * len(fields) * n_attempts * len(gfa_rot)\n\n if show_progress:\n pbar = tqdm.tqdm(total=len(fields))\n else:\n pbar = None\n\n n = 0\n\n for field_id in fields:\n field_str = f'{field_id:05d}'\n\n for att in range(1, n_attempts + 1):\n\n att_str = f'{att:03d}'\n prefix = f'{field_str}_{att_str}'\n\n att_path = path / field_str / att_str\n att_config = yaml.safe_load(open(att_path / f'config_{prefix}.yaml'))\n\n data_out = pandas.read_hdf(att_path / f'data_{prefix}.out.h5')\n solve_time_avg = (att_config['output']['solve_field_time'] /\n len(att_config['input']['gfa_centres']))\n\n for gfa in att_config['input']['gfa_centres']:\n\n gfa_data = data_out.loc[data_out.gfa == gfa]\n\n solved = att_config['output']['solved'][gfa]\n\n if solved:\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n wcs = astropy.wcs.WCS(str(att_path /\n f'gfa{gfa}_{prefix}.wcs'))\n rot = get_wcs_rotation(wcs)\n rot = -rot % 360\n\n sep = gfa_data.separation\n rmse = numpy.sqrt(numpy.sum((sep * 3600.)**2) / len(sep))\n\n else:\n\n rot = None\n rmse = None\n\n row = Row(\n observatory=obs,\n field=field_id,\n attempt=att,\n gfa=gfa,\n field_ra=fields[field_id][0],\n field_dec=fields[field_id][1],\n fwhm=att_config['input']['fwhm'],\n n_stars=att_config['input']['n_stars_per_gfa'][gfa],\n n_detected=att_config['input']['n_detected_per_gfa'][gfa],\n min_mag=gfa_data[mag_column].min(),\n max_mag=gfa_data[mag_column].max(),\n solved=solved,\n solve_time_avg=solve_time_avg,\n rot=rot,\n true_rot=gfa_rot[gfa],\n rmse=rmse)\n\n rows[n] = row\n n += 1\n\n if pbar:\n pbar.update()\n\n return pandas.DataFrame(rows[0:n]).set_index(['field', 'attempt', 'gfa'])\n" ]
[ [ "numpy.random.uniform", "numpy.sum", "numpy.arcsin", "pandas.DataFrame", "numpy.random.seed", "numpy.cos", "pandas.read_hdf", "numpy.log10", "numpy.log", "numpy.random.normal", "numpy.sin", "numpy.radians" ] ]
anjlip/pymatgen
[ "5cc42912a12a265a603df7e34c856561f76edc1f" ]
[ "pymatgen/analysis/graphs.py" ]
[ "# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport warnings\nimport subprocess\nimport numpy as np\nimport os.path\nimport copy\nfrom itertools import combinations\n\nfrom pymatgen.core import Structure, Lattice, PeriodicSite, Molecule\nfrom pymatgen.core.structure import FunctionalGroups\nfrom pymatgen.util.coord import lattice_points_in_supercell\nfrom pymatgen.vis.structure_vtk import EL_COLORS\n\nfrom monty.json import MSONable\nfrom monty.os.path import which\nfrom operator import itemgetter\nfrom collections import namedtuple, defaultdict\nfrom scipy.spatial import KDTree\nfrom scipy.stats import describe\n\nimport networkx as nx\nimport networkx.algorithms.isomorphism as iso\nfrom networkx.readwrite import json_graph\nfrom networkx.drawing.nx_agraph import write_dot\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n__author__ = \"Matthew Horton, Evan Spotte-Smith\"\n__version__ = \"0.1\"\n__maintainer__ = \"Matthew Horton\"\n__email__ = \"[email protected]\"\n__status__ = \"Beta\"\n__date__ = \"August 2017\"\n\nConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist')\n\n\nclass StructureGraph(MSONable):\n \"\"\"\n This is a class for annotating a Structure with\n bond information, stored in the form of a graph. A \"bond\" does\n not necessarily have to be a chemical bond, but can store any\n kind of information that connects two Sites.\n \"\"\"\n\n def __init__(self, structure, graph_data=None):\n \"\"\"\n If constructing this class manually, use the `with_empty_graph`\n method or `with_local_env_strategy` method (using an algorithm\n provided by the `local_env` module, such as O'Keeffe).\n\n This class that contains connection information:\n relationships between sites represented by a Graph structure,\n and an associated structure object.\n\n This class uses the NetworkX package to store and operate\n on the graph itself, but contains a lot of helper methods\n to make associating a graph with a given crystallographic\n structure easier.\n\n Use cases for this include storing bonding information,\n NMR J-couplings, Heisenberg exchange parameters, etc.\n\n For periodic graphs, class stores information on the graph\n edges of what lattice image the edge belongs to.\n\n :param structure: a Structure object\n\n :param graph_data: dict containing graph information in\n dict format (not intended to be constructed manually,\n see as_dict method for format)\n \"\"\"\n\n if isinstance(structure, StructureGraph):\n # just make a copy from input\n graph_data = structure.as_dict()['graphs']\n\n self.structure = structure\n self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)\n\n # tidy up edge attr dicts, reading to/from json duplicates\n # information\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if 'id' in d:\n del d['id']\n if 'key' in d:\n del d['key']\n # ensure images are tuples (conversion to lists happens\n # when serializing back from json), it's important images\n # are hashable/immutable\n if 'to_jimage' in d:\n d['to_jimage'] = tuple(d['to_jimage'])\n if 'from_jimage' in d:\n d['from_jimage'] = tuple(d['from_jimage'])\n\n @classmethod\n def with_empty_graph(cls, structure, name=\"bonds\",\n edge_weight_name=None,\n edge_weight_units=None):\n \"\"\"\n Constructor for StructureGraph, returns a StructureGraph\n object with an empty graph (no edges, only nodes defined\n that correspond to Sites in Structure).\n\n :param structure (Structure):\n :param name (str): name of graph, e.g. \"bonds\"\n :param edge_weight_name (str): name of edge weights,\n e.g. \"bond_length\" or \"exchange_constant\"\n :param edge_weight_units (str): name of edge weight units\n e.g. \"Å\" or \"eV\"\n :return (StructureGraph):\n \"\"\"\n\n if edge_weight_name and (edge_weight_units is None):\n raise ValueError(\"Please specify units associated \"\n \"with your edge weights. Can be \"\n \"empty string if arbitrary or \"\n \"dimensionless.\")\n\n # construct graph with one node per site\n # graph attributes don't change behavior of graph,\n # they're just for book-keeping\n graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,\n edge_weight_units=edge_weight_units,\n name=name)\n graph.add_nodes_from(range(len(structure)))\n\n graph_data = json_graph.adjacency_data(graph)\n\n return cls(structure, graph_data=graph_data)\n\n @staticmethod\n def with_edges(structure, edges):\n \"\"\"\n Constructor for MoleculeGraph, using pre-existing or pre-defined edges\n with optional edge parameters.\n\n :param molecule: Molecule object\n :param edges: dict representing the bonds of the functional\n group (format: {(from_index, to_index, from_image, to_image): props},\n where props is a dictionary of properties, including weight.\n Props should be None if no additional properties are to be\n specified.\n :return: sg, a StructureGraph\n \"\"\"\n\n sg = StructureGraph.with_empty_graph(structure, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for edge, props in edges.items():\n\n try:\n from_index = edge[0]\n to_index = edge[1]\n from_image = edge[2]\n to_image = edge[3]\n except TypeError:\n raise ValueError(\"Edges must be given as (from_index, to_index,\"\n \" from_image, to_image) tuples\")\n\n if props is not None:\n if \"weight\" in props.keys():\n weight = props[\"weight\"]\n del props[\"weight\"]\n else:\n weight = None\n\n if len(props.items()) == 0:\n props = None\n else:\n weight = None\n\n nodes = sg.graph.nodes\n if not (from_index in nodes and to_index in nodes):\n raise ValueError(\"Edges cannot be added if nodes are not\"\n \" present in the graph. Please check your\"\n \" indices.\")\n\n sg.add_edge(from_index, to_index, from_jimage=from_image,\n to_jimage=to_image, weight=weight,\n edge_properties=props)\n\n sg.set_node_attributes()\n return sg\n\n @staticmethod\n def with_local_env_strategy(structure, strategy):\n \"\"\"\n Constructor for StructureGraph, using a strategy\n from :Class: `pymatgen.analysis.local_env`.\n\n :param structure: Structure object\n :param strategy: an instance of a\n :Class: `pymatgen.analysis.local_env.NearNeighbors` object\n :return:\n \"\"\"\n\n sg = StructureGraph.with_empty_graph(structure, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):\n for neighbor in neighbors:\n\n # local_env will always try to add two edges\n # for any one bond, one from site u to site v\n # and another form site v to site u: this is\n # harmless, so warn_duplicates=False\n sg.add_edge(from_index=n,\n from_jimage=(0, 0, 0),\n to_index=neighbor['site_index'],\n to_jimage=neighbor['image'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n return sg\n\n @property\n def name(self):\n \"\"\"\n :return: Name of graph\n \"\"\"\n return self.graph.graph['name']\n\n @property\n def edge_weight_name(self):\n \"\"\"\n :return: Name of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_name']\n\n @property\n def edge_weight_unit(self):\n \"\"\"\n :return: Units of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_units']\n\n def add_edge(self, from_index, to_index,\n from_jimage=(0, 0, 0), to_jimage=None,\n weight=None, warn_duplicates=True,\n edge_properties=None):\n \"\"\"\n Add edge to graph.\n\n Since physically a 'bond' (or other connection\n between sites) doesn't have a direction, from_index,\n from_jimage can be swapped with to_index, to_jimage.\n\n However, images will always always be shifted so that\n from_index < to_index and from_jimage becomes (0, 0, 0).\n\n :param from_index: index of site connecting from\n :param to_index: index of site connecting to\n :param from_jimage (tuple of ints): lattice vector of periodic\n image, e.g. (1, 0, 0) for periodic image in +x direction\n :param to_jimage (tuple of ints): lattice vector of image\n :param weight (float): e.g. bond length\n :param warn_duplicates (bool): if True, will warn if\n trying to add duplicate edges (duplicate edges will not\n be added in either case)\n :param edge_properties (dict): any other information to\n store on graph edges, similar to Structure's site_properties\n :return:\n \"\"\"\n\n # this is not necessary for the class to work, but\n # just makes it neater\n if to_index < from_index:\n to_index, from_index = from_index, to_index\n to_jimage, from_jimage = from_jimage, to_jimage\n\n # constrain all from_jimages to be (0, 0, 0),\n # initial version of this class worked even if\n # from_jimage != (0, 0, 0), but making this\n # assumption simplifies logic later\n if not np.array_equal(from_jimage, (0, 0, 0)):\n shift = from_jimage\n from_jimage = np.subtract(from_jimage, shift)\n to_jimage = np.subtract(to_jimage, shift)\n\n # automatic detection of to_jimage if user doesn't specify\n # will try and detect all equivalent images and add multiple\n # edges if appropriate\n if to_jimage is None:\n # assume we want the closest site\n warnings.warn(\"Please specify to_jimage to be unambiguous, \"\n \"trying to automatically detect.\")\n dist, to_jimage = self.structure[from_index]\\\n .distance_and_image(self.structure[to_index])\n if dist == 0:\n # this will happen when from_index == to_index,\n # typically in primitive single-atom lattices\n images = [1, 0, 0], [0, 1, 0], [0, 0, 1]\n dists = []\n for image in images:\n dists.append(self.structure[from_index]\n .distance_and_image(self.structure[from_index],\n jimage=image)[0])\n dist = min(dists)\n equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords,\n dist,\n dist*0.01,\n include_index=True)\n for site, dist, to_index in equiv_sites:\n to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords)\n to_jimage = to_jimage.astype(int)\n self.add_edge(from_index=from_index, from_jimage=(0, 0, 0),\n to_jimage=to_jimage, to_index=to_index)\n return\n\n # sanitize types\n from_jimage, to_jimage = tuple(map(int, from_jimage)), tuple(map(int, to_jimage))\n from_index, to_index = int(from_index), int(to_index)\n\n # check we're not trying to add a duplicate edge\n # there should only ever be at most one edge\n # between a given (site, jimage) pair and another\n # (site, jimage) pair\n existing_edge_data = self.graph.get_edge_data(from_index, to_index)\n if existing_edge_data:\n for key, d in existing_edge_data.items():\n if d[\"to_jimage\"] == to_jimage:\n if warn_duplicates:\n warnings.warn(\"Trying to add an edge that already exists from \"\n \"site {} to site {} in {}.\".format(from_index,\n to_index,\n to_jimage))\n return\n\n # generic container for additional edge properties,\n # similar to site properties\n edge_properties = edge_properties or {}\n\n if weight:\n self.graph.add_edge(from_index, to_index,\n to_jimage=to_jimage,\n weight=weight,\n **edge_properties)\n else:\n self.graph.add_edge(from_index, to_index,\n to_jimage=to_jimage,\n **edge_properties)\n\n def insert_node(self, i, species, coords, coords_are_cartesian=False,\n validate_proximity=False, site_properties=None, edges=None):\n \"\"\"\n A wrapper around Molecule.insert(), which also incorporates the new\n site into the MoleculeGraph.\n\n :param i: Index at which to insert the new site\n :param species: Species for the new site\n :param coords: 3x1 array representing coordinates of the new site\n :param coords_are_cartesian: Whether coordinates are cartesian.\n Defaults to False.\n :param validate_proximity: For Molecule.insert(); if True (default\n False), distance will be checked to ensure that site can be safely\n added.\n :param site_properties: Site properties for Molecule\n :param edges: List of dicts representing edges to be added to the\n MoleculeGraph. These edges must include the index of the new site i,\n and all indices used for these edges should reflect the\n MoleculeGraph AFTER the insertion, NOT before. Each dict should at\n least have a \"to_index\" and \"from_index\" key, and can also have a\n \"weight\" and a \"properties\" key.\n :return:\n \"\"\"\n\n self.structure.insert(i, species, coords,\n coords_are_cartesian=coords_are_cartesian,\n validate_proximity=validate_proximity,\n properties=site_properties)\n\n mapping = {}\n for j in range(len(self.structure) - 1):\n if j < i:\n mapping[j] = j\n else:\n mapping[j] = j + 1\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n self.graph.add_node(i)\n self.set_node_attributes()\n\n if edges is not None:\n for edge in edges:\n try:\n self.add_edge(edge[\"from_index\"], edge[\"to_index\"],\n from_jimage=(0, 0, 0),\n to_jimage=edge[\"to_jimage\"],\n weight=edge.get(\"weight\", None),\n edge_properties=edge.get(\"properties\", None))\n except KeyError:\n raise RuntimeError(\"Some edges are invalid.\")\n\n def set_node_attributes(self):\n \"\"\"\n Gives each node a \"specie\" and a \"coords\" attribute, updated with the\n current species and coordinates.\n\n :return:\n \"\"\"\n\n species = {}\n coords = {}\n properties = {}\n for node in self.graph.nodes():\n species[node] = self.structure[node].specie.symbol\n coords[node] = self.structure[node].coords\n properties[node] = self.structure[node].properties\n\n nx.set_node_attributes(self.graph, species, \"specie\")\n nx.set_node_attributes(self.graph, coords, \"coords\")\n nx.set_node_attributes(self.graph, properties, \"properties\")\n\n def alter_edge(self, from_index, to_index, to_jimage=None,\n new_weight=None, new_edge_properties=None):\n \"\"\"\n Alters either the weight or the edge_properties of\n an edge in the StructureGraph.\n\n :param from_index: int\n :param to_index: int\n :param to_jimage: tuple\n :param new_weight: alter_edge does not require\n that weight be altered. As such, by default, this\n is None. If weight is to be changed, it should be a\n float.\n :param new_edge_properties: alter_edge does not require\n that edge_properties be altered. As such, by default,\n this is None. If any edge properties are to be changed,\n it should be a dictionary of edge properties to be changed.\n :return:\n \"\"\"\n\n existing_edges = self.graph.get_edge_data(from_index, to_index)\n\n # ensure that edge exists before attempting to change it\n if not existing_edges:\n raise ValueError(\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n if to_jimage is None:\n edge_index = 0\n else:\n for i, properties in existing_edges.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n if new_weight is not None:\n self.graph[from_index][to_index][edge_index]['weight'] = new_weight\n\n if new_edge_properties is not None:\n for prop in list(new_edge_properties.keys()):\n self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]\n\n def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):\n \"\"\"\n Remove an edge from the StructureGraph. If no image is given, this method will fail.\n\n :param from_index: int\n :param to_index: int\n :param to_jimage: tuple\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return:\n \"\"\"\n\n # ensure that edge exists before attempting to remove it\n existing_edges = self.graph.get_edge_data(from_index, to_index)\n existing_reverse = None\n\n if to_jimage is None:\n raise ValueError(\"Image must be supplied, to avoid ambiguity.\")\n\n if existing_edges:\n for i, properties in existing_edges.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n self.graph.remove_edge(from_index, to_index, edge_index)\n\n else:\n if allow_reverse:\n existing_reverse = self.graph.get_edge_data(to_index, from_index)\n\n if existing_reverse:\n for i, properties in existing_reverse.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n self.graph.remove_edge(to_index, from_index, edge_index)\n else:\n raise ValueError(\"Edge cannot be broken between {} and {};\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n def remove_nodes(self, indices):\n \"\"\"\n A wrapper for Molecule.remove_sites().\n\n :param indices: list of indices in the current Molecule (and graph) to\n be removed.\n :return:\n \"\"\"\n\n self.structure.remove_sites(indices)\n self.graph.remove_nodes_from(indices)\n\n mapping = {}\n for correct, current in enumerate(sorted(self.graph.nodes)):\n mapping[current] = correct\n\n nx.relabel_nodes(self.graph, mapping, copy=False)\n self.set_node_attributes()\n\n def substitute_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None):\n \"\"\"\n Builds off of Structure.substitute to replace an atom in self.structure\n with a functional group. This method also amends self.graph to\n incorporate the new functional group.\n\n NOTE: Care must be taken to ensure that the functional group that is\n substituted will not place atoms to close to each other, or violate the\n dimensions of the Lattice.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are two options:\n\n 1. Providing an actual Molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :return:\n \"\"\"\n\n def map_indices(grp):\n grp_map = {}\n\n # Get indices now occupied by functional group\n # Subtracting 1 because the dummy atom X should not count\n atoms = len(grp) - 1\n offset = len(self.structure) - atoms\n\n for i in range(atoms):\n grp_map[i] = i + offset\n\n return grp_map\n\n if isinstance(func_grp, Molecule):\n func_grp = copy.deepcopy(func_grp)\n else:\n try:\n func_grp = copy.deepcopy(FunctionalGroups[func_grp])\n except:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n\n self.structure.substitute(index, func_grp, bond_order=bond_order)\n\n mapping = map_indices(func_grp)\n\n # Remove dummy atom \"X\"\n func_grp.remove_species(\"X\")\n\n if graph_dict is not None:\n for (u, v) in graph_dict.keys():\n edge_props = graph_dict[(u, v)]\n if \"to_jimage\" in edge_props.keys():\n to_jimage = edge_props[\"to_jimage\"]\n del edge_props[\"to_jimage\"]\n else:\n # By default, assume that all edges should stay remain\n # inside the initial image\n to_jimage = (0, 0, 0)\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v], to_jimage=to_jimage,\n weight=weight, edge_properties=edge_props)\n\n else:\n if strategy_params is None:\n strategy_params = {}\n strat = strategy(**strategy_params)\n\n for site in mapping.values():\n neighbors = strat.get_nn_info(self.structure, site)\n\n for neighbor in neighbors:\n self.add_edge(from_index=site,\n from_jimage=(0, 0, 0),\n to_index=neighbor['site_index'],\n to_jimage=neighbor['image'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n def get_connected_sites(self, n, jimage=(0, 0, 0)):\n \"\"\"\n Returns a named tuple of neighbors of site n:\n periodic_site, jimage, index, weight.\n Index is the index of the corresponding site\n in the original structure, weight can be\n None if not defined.\n :param n: index of Site in Structure\n :param jimage: lattice vector of site\n :return: list of ConnectedSite tuples,\n sorted by closest first\n \"\"\"\n\n connected_sites = set()\n connected_site_images = set()\n\n out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)]\n in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)]\n\n for u, v, d, dir in out_edges + in_edges:\n\n to_jimage = d['to_jimage']\n\n if dir == 'in':\n u, v = v, u\n to_jimage = np.multiply(-1, to_jimage)\n\n to_jimage = tuple(map(int, np.add(to_jimage, jimage)))\n site_d = self.structure[v].as_dict()\n site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist()\n site = PeriodicSite.from_dict(site_d)\n\n # from_site if jimage arg != (0, 0, 0)\n relative_jimage = np.subtract(to_jimage, jimage)\n dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)\n\n weight = d.get('weight', None)\n\n if (v, to_jimage) not in connected_site_images:\n\n connected_site = ConnectedSite(site=site,\n jimage=to_jimage,\n index=v,\n weight=weight,\n dist=dist)\n\n connected_sites.add(connected_site)\n connected_site_images.add((v, to_jimage))\n\n # return list sorted by closest sites first\n connected_sites = list(connected_sites)\n connected_sites.sort(key=lambda x: x.dist)\n\n return connected_sites\n\n def get_coordination_of_site(self, n):\n \"\"\"\n Returns the number of neighbors of site n.\n In graph terms, simply returns degree\n of node corresponding to site n.\n :param n: index of site\n :return (int):\n \"\"\"\n number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])\n return self.graph.degree(n) - number_of_self_loops\n\n def draw_graph_to_file(self, filename=\"graph\",\n diff=None,\n hide_unconnected_nodes=False,\n hide_image_edges=True,\n edge_colors=False,\n node_labels=False,\n weight_labels=False,\n image_labels=False,\n color_scheme=\"VESTA\",\n keep_dot=False,\n algo=\"fdp\"):\n \"\"\"\n Draws graph using GraphViz.\n\n The networkx graph object itself can also be drawn\n with networkx's in-built graph drawing methods, but\n note that this might give misleading results for\n multigraphs (edges are super-imposed on each other).\n\n If visualization is difficult to interpret,\n `hide_image_edges` can help, especially in larger\n graphs.\n\n :param filename: filename to output, will detect filetype\n from extension (any graphviz filetype supported, such as\n pdf or png)\n :param diff (StructureGraph): an additional graph to\n compare with, will color edges red that do not exist in diff\n and edges green that are in diff graph but not in the\n reference graph\n :param hide_unconnected_nodes: if True, hide unconnected\n nodes\n :param hide_image_edges: if True, do not draw edges that\n go through periodic boundaries\n :param edge_colors (bool): if True, use node colors to\n color edges\n :param node_labels (bool): if True, label nodes with\n species and site index\n :param weight_labels (bool): if True, label edges with\n weights\n :param image_labels (bool): if True, label edges with\n their periodic images (usually only used for debugging,\n edges to periodic images always appear as dashed lines)\n :param color_scheme (str): \"VESTA\" or \"JMOL\"\n :param keep_dot (bool): keep GraphViz .dot file for later\n visualization\n :param algo: any graphviz algo, \"neato\" (for simple graphs)\n or \"fdp\" (for more crowded graphs) usually give good outputs\n :return:\n \"\"\"\n\n if not which(algo):\n raise RuntimeError(\"StructureGraph graph drawing requires \"\n \"GraphViz binaries to be in the path.\")\n\n # Developer note: NetworkX also has methods for drawing\n # graphs using matplotlib, these also work here. However,\n # a dedicated tool like GraphViz allows for much easier\n # control over graph appearance and also correctly displays\n # mutli-graphs (matplotlib can superimpose multiple edges).\n\n g = self.graph.copy()\n\n g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': \"false\"}\n\n # add display options for nodes\n for n in g.nodes():\n\n # get label by species name\n label = \"{}({})\".format(str(self.structure[n].specie), n) if node_labels else \"\"\n\n # use standard color scheme for nodes\n c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])\n\n # get contrasting font color\n # magic numbers account for perceived luminescence\n # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color\n fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587\n + c[2] * 0.114) / 255 < 0.5 else '#ffffff'\n\n # convert color to hex string\n color = \"#{:02x}{:02x}{:02x}\".format(c[0], c[1], c[2])\n\n g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,\n fontname=\"Helvetica-bold\", style=\"filled\", shape=\"circle\")\n\n edges_to_delete = []\n\n # add display options for edges\n for u, v, k, d in g.edges(keys=True, data=True):\n\n # retrieve from/to images, set as origin if not defined\n to_image = d['to_jimage']\n\n # set edge style\n d['style'] = \"solid\"\n if to_image != (0, 0, 0):\n d['style'] = \"dashed\"\n if hide_image_edges:\n edges_to_delete.append((u, v, k))\n\n # don't show edge directions\n d['arrowhead'] = \"none\"\n\n # only add labels for images that are not the origin\n if image_labels:\n d['headlabel'] = \"\" if to_image == (0, 0, 0) else \"to {}\".format((to_image))\n d['arrowhead'] = \"normal\" if d['headlabel'] else \"none\"\n\n # optionally color edges using node colors\n color_u = g.node[u]['fillcolor']\n color_v = g.node[v]['fillcolor']\n d['color_uv'] = \"{};0.5:{};0.5\".format(color_u, color_v) if edge_colors else \"#000000\"\n\n # optionally add weights to graph\n if weight_labels:\n units = g.graph.get('edge_weight_units', \"\")\n if d.get('weight'):\n d['label'] = \"{:.2f} {}\".format(d['weight'], units)\n\n # update edge with our new style attributes\n g.edges[u, v, k].update(d)\n\n # optionally remove periodic image edges,\n # these can be confusing due to periodic boundaries\n if hide_image_edges:\n for edge_to_delete in edges_to_delete:\n g.remove_edge(*edge_to_delete)\n\n # optionally hide unconnected nodes,\n # these can appear when removing periodic edges\n if hide_unconnected_nodes:\n g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])\n\n # optionally highlight differences with another graph\n if diff:\n diff = self.diff(diff, strict=True)\n green_edges = []\n red_edges = []\n for u, v, k, d in g.edges(keys=True, data=True):\n if (u, v, d['to_jimage']) in diff['self']:\n # edge has been deleted\n red_edges.append((u, v, k))\n elif (u, v, d['to_jimage']) in diff['other']:\n # edge has been added\n green_edges.append((u, v, k))\n for u, v, k in green_edges:\n g.edges[u, v, k].update({'color_uv': '#00ff00'})\n for u, v, k in red_edges:\n g.edges[u, v, k].update({'color_uv': '#ff0000'})\n\n basename, extension = os.path.splitext(filename)\n extension = extension[1:]\n\n write_dot(g, basename+\".dot\")\n\n with open(filename, \"w\") as f:\n\n args = [algo, \"-T\", extension, basename+\".dot\"]\n rs = subprocess.Popen(args,\n stdout=f,\n stdin=subprocess.PIPE, close_fds=True)\n rs.communicate()\n if rs.returncode != 0:\n raise RuntimeError(\"{} exited with return code {}.\".format(algo, rs.returncode))\n\n if not keep_dot:\n os.remove(basename+\".dot\")\n\n @property\n def types_and_weights_of_connections(self):\n \"\"\"\n Extract a dictionary summarizing the types and weights\n of edges in the graph.\n\n :return: A dictionary with keys specifying the\n species involved in a connection in alphabetical order\n (e.g. string 'Fe-O') and values which are a list of\n weights for those connections (e.g. bond lengths).\n \"\"\"\n def get_label(u, v):\n u_label = self.structure[u].species_string\n v_label = self.structure[v].species_string\n return \"-\".join(sorted((u_label, v_label)))\n\n types = defaultdict(list)\n for u, v, d in self.graph.edges(data=True):\n label = get_label(u, v)\n types[label].append(d['weight'])\n\n return dict(types)\n\n @property\n def weight_statistics(self):\n \"\"\"\n Extract a statistical summary of edge weights present in\n the graph.\n\n :return: A dict with an 'all_weights' list, 'minimum',\n 'maximum', 'median', 'mean', 'std_dev'\n \"\"\"\n\n all_weights = [d.get('weight', None) for u, v, d\n in self.graph.edges(data=True)]\n stats = describe(all_weights, nan_policy='omit')\n\n return {\n 'all_weights': all_weights,\n 'min': stats.minmax[0],\n 'max': stats.minmax[1],\n 'mean': stats.mean,\n 'variance': stats.variance\n }\n\n def types_of_coordination_environments(self, anonymous=False):\n \"\"\"\n Extract information on the different co-ordination environments\n present in the graph.\n\n :param anonymous: if anonymous, will replace specie names\n with A, B, C, etc.\n :return: a list of co-ordination environments,\n e.g. ['Mo-S(6)', 'S-Mo(3)']\n \"\"\"\n\n motifs = set()\n for idx, site in enumerate(self.structure):\n\n centre_sp = site.species_string\n\n connected_sites = self.get_connected_sites(idx)\n connected_species = [connected_site.site.species_string\n for connected_site in connected_sites]\n\n labels = []\n for sp in set(connected_species):\n count = connected_species.count(sp)\n labels.append((count, sp))\n\n labels = sorted(labels, reverse=True)\n\n if anonymous:\n mapping = {centre_sp: 'A'}\n available_letters = [chr(66+i) for i in range(25)]\n for label in labels:\n sp = label[1]\n if sp not in mapping:\n mapping[sp] = available_letters.pop(0)\n centre_sp = 'A'\n labels = [(label[0], mapping[label[1]]) for label in labels]\n\n labels = [\"{}({})\".format(label[1], label[0]) for label in labels]\n motif = '{}-{}'.format(centre_sp, ','.join(labels))\n motifs.add(motif)\n\n return sorted(list(motifs))\n\n def as_dict(self):\n \"\"\"\n As in :Class: `pymatgen.core.Structure` except\n with using `to_dict_of_dicts` from NetworkX\n to store graph information.\n \"\"\"\n\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"graphs\": json_graph.adjacency_data(self.graph)}\n\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n As in :Class: `pymatgen.core.Structure` except\n restoring graphs using `from_dict_of_dicts`\n from NetworkX to restore graph information.\n \"\"\"\n s = Structure.from_dict(d['structure'])\n return cls(s, d['graphs'])\n\n def __mul__(self, scaling_matrix):\n \"\"\"\n Replicates the graph, creating a supercell,\n intelligently joining together\n edges that lie on periodic boundaries.\n In principle, any operations on the expanded\n graph could also be done on the original\n graph, but a larger graph can be easier to\n visualize and reason about.\n :param scaling_matrix: same as Structure.__mul__\n :return:\n \"\"\"\n\n # Developer note: a different approach was also trialed, using\n # a simple Graph (instead of MultiDiGraph), with node indices\n # representing both site index and periodic image. Here, the\n # number of nodes != number of sites in the Structure. This\n # approach has many benefits, but made it more difficult to\n # keep the graph in sync with its corresponding Structure.\n\n # Broadly, it would be easier to multiply the Structure\n # *before* generating the StructureGraph, but this isn't\n # possible when generating the graph using critic2 from\n # charge density.\n\n # Multiplication works by looking for the expected position\n # of an image node, and seeing if that node exists in the\n # supercell. If it does, the edge is updated. This is more\n # computationally expensive than just keeping track of the\n # which new lattice images present, but should hopefully be\n # easier to extend to a general 3x3 scaling matrix.\n\n # code adapted from Structure.__mul__\n scale_matrix = np.array(scaling_matrix, np.int16)\n if scale_matrix.shape != (3, 3):\n scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)\n else:\n # TODO: test __mul__ with full 3x3 scaling matrices\n raise NotImplementedError('Not tested with 3x3 scaling matrices yet.')\n new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))\n\n f_lat = lattice_points_in_supercell(scale_matrix)\n c_lat = new_lattice.get_cartesian_coords(f_lat)\n\n new_sites = []\n new_graphs = []\n\n for v in c_lat:\n\n # create a map of nodes from original graph to its image\n mapping = {n: n + len(new_sites) for n in range(len(self.structure))}\n\n for idx, site in enumerate(self.structure):\n\n s = PeriodicSite(site.species, site.coords + v,\n new_lattice, properties=site.properties,\n coords_are_cartesian=True, to_unit_cell=False)\n\n new_sites.append(s)\n\n new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))\n\n new_structure = Structure.from_sites(new_sites)\n\n # merge all graphs into one big graph\n new_g = nx.MultiDiGraph()\n for new_graph in new_graphs:\n new_g = nx.union(new_g, new_graph)\n\n edges_to_remove = [] # tuple of (u, v, k)\n edges_to_add = [] # tuple of (u, v, attr_dict)\n\n # list of new edges inside supercell\n # for duplicate checking\n edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True)\n if d['to_jimage'] == (0, 0, 0)]\n new_periodic_images = []\n\n orig_lattice = self.structure.lattice\n\n # use k-d tree to match given position to an\n # existing Site in Structure\n kd_tree = KDTree(new_structure.cart_coords)\n\n # tolerance in Å for sites to be considered equal\n # this could probably be a lot smaller\n tol = 0.05\n\n for u, v, k, d in new_g.edges(keys=True, data=True):\n\n to_jimage = d['to_jimage'] # for node v\n\n # reduce unnecessary checking\n if to_jimage != (0, 0, 0):\n\n # get index in original site\n n_u = u % len(self.structure)\n n_v = v % len(self.structure)\n\n # get fractional co-ordinates of where atoms defined\n # by edge are expected to be, relative to original\n # lattice (keeping original lattice has\n # significant benefits)\n v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)\n u_frac = self.structure[n_u].frac_coords\n\n # using the position of node u as a reference,\n # get relative Cartesian co-ordinates of where\n # atoms defined by edge are expected to be\n v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)\n u_cart = orig_lattice.get_cartesian_coords(u_frac)\n v_rel = np.subtract(v_image_cart, u_cart)\n\n # now retrieve position of node v in\n # new supercell, and get asgolute Cartesian\n # co-ordinates of where atoms defined by edge\n # are expected to be\n v_expec = new_structure[u].coords + v_rel\n\n # now search in new structure for these atoms\n # query returns (distance, index)\n v_present = kd_tree.query(v_expec)\n v_present = v_present[1] if v_present[0] <= tol else None\n\n # check if image sites now present in supercell\n # and if so, delete old edge that went through\n # periodic boundary\n if v_present is not None:\n\n new_u = u\n new_v = v_present\n new_d = d.copy()\n\n # node now inside supercell\n new_d['to_jimage'] = (0, 0, 0)\n\n edges_to_remove.append((u, v, k))\n\n # make sure we don't try to add duplicate edges\n # will remove two edges for everyone one we add\n if {new_u, new_v} not in edges_inside_supercell:\n\n # normalize direction\n if new_v < new_u:\n new_u, new_v = new_v, new_u\n\n edges_inside_supercell.append({new_u, new_v})\n edges_to_add.append((new_u, new_v, new_d))\n\n else:\n\n # want to find new_v such that we have\n # full periodic boundary conditions\n # so that nodes on one side of supercell\n # are connected to nodes on opposite side\n\n v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)\n\n # find new to_jimage\n # use np.around to fix issues with finite precision leading to incorrect image\n v_expec_image = np.around(v_expec_frac, decimals=3)\n v_expec_image = v_expec_image - v_expec_image%1\n\n v_expec_frac = np.subtract(v_expec_frac, v_expec_image)\n v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)\n v_present = kd_tree.query(v_expec)\n v_present = v_present[1] if v_present[0] <= tol else None\n\n if v_present is not None:\n\n new_u = u\n new_v = v_present\n new_d = d.copy()\n new_to_jimage = tuple(map(int, v_expec_image))\n\n # normalize direction\n if new_v < new_u:\n new_u, new_v = new_v, new_u\n new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int))\n\n new_d['to_jimage'] = new_to_jimage\n\n edges_to_remove.append((u, v, k))\n\n if (new_u, new_v, new_to_jimage) not in new_periodic_images:\n edges_to_add.append((new_u, new_v, new_d))\n new_periodic_images.append((new_u, new_v, new_to_jimage))\n\n logger.debug(\"Removing {} edges, adding {} new edges.\".format(len(edges_to_remove),\n len(edges_to_add)))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n new_g.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n new_g.add_edge(u, v, **d)\n\n # return new instance of StructureGraph with supercell\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": new_structure.as_dict(),\n \"graphs\": json_graph.adjacency_data(new_g)}\n\n sg = StructureGraph.from_dict(d)\n\n return sg\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def _edges_to_string(self, g):\n\n header = \"from to to_image \"\n header_line = \"---- ---- ------------\"\n edge_weight_name = g.graph[\"edge_weight_name\"]\n if edge_weight_name:\n print_weights = [\"weight\"]\n edge_label = g.graph[\"edge_weight_name\"]\n edge_weight_units = g.graph[\"edge_weight_units\"]\n if edge_weight_units:\n edge_label += \" ({})\".format(edge_weight_units)\n header += \" {}\".format(edge_label)\n header_line += \" {}\".format(\"-\"*max([18, len(edge_label)]))\n else:\n print_weights = False\n\n s = header + \"\\n\" + header_line + \"\\n\"\n\n edges = list(g.edges(data=True))\n\n # sort edges for consistent ordering\n edges.sort(key=itemgetter(0,1))\n\n if print_weights:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12} {:.3e}\\n\".format(u, v, str(data.get(\"to_jimage\", (0, 0, 0))),\n data.get(\"weight\", 0))\n else:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12}\\n\".format(u, v,\n str(data.get(\"to_jimage\", (0, 0, 0))))\n\n return s\n\n def __str__(self):\n s = \"Structure Graph\"\n s += \"\\nStructure: \\n{}\".format(self.structure.__str__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __repr__(self):\n s = \"Structure Graph\"\n s += \"\\nStructure: \\n{}\".format(self.structure.__repr__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __len__(self):\n \"\"\"\n :return: length of Structure / number of nodes in graph\n \"\"\"\n return len(self.structure)\n\n def sort(self, key=None, reverse=False):\n \"\"\"\n Same as Structure.sort(), also remaps nodes in graph.\n :param key:\n :param reverse:\n :return:\n \"\"\"\n\n old_structure = self.structure.copy()\n\n # sort Structure\n self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)\n\n # apply Structure ordering to graph\n mapping = {idx:self.structure.index(site) for idx, site in enumerate(old_structure)}\n self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)\n\n # normalize directions of edges\n edges_to_remove = []\n edges_to_add = []\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if v < u:\n new_v, new_u, new_d = u, v, d.copy()\n new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int))\n edges_to_remove.append((u, v, k))\n edges_to_add.append((new_u, new_v, new_d))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n self.graph.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n self.graph.add_edge(u, v, **d)\n\n def __copy__(self):\n return StructureGraph.from_dict(self.as_dict())\n\n def __eq__(self, other):\n \"\"\"\n Two StructureGraphs are equal if they have equal Structures,\n and have the same edges between Sites. Edge weights can be\n different and StructureGraphs can still be considered equal.\n\n :param other: StructureGraph\n :return (bool):\n \"\"\"\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d['to_jimage'])\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d['to_jimage'])\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n return (edges == edges_other) and \\\n (self.structure == other_sorted.structure)\n\n def diff(self, other, strict=True):\n \"\"\"\n Compares two StructureGraphs. Returns dict with\n keys 'self', 'other', 'both' with edges that are\n present in only one StructureGraph ('self' and\n 'other'), and edges that are present in both.\n\n The Jaccard distance is a simple measure of the\n dissimilarity between two StructureGraphs (ignoring\n edge weights), and is defined by 1 - (size of the\n intersection / size of the union) of the sets of\n edges. This is returned with key 'dist'.\n\n Important note: all node indices are in terms\n of the StructureGraph this method is called\n from, not the 'other' StructureGraph: there\n is no guarantee the node indices will be the\n same if the underlying Structures are ordered\n differently.\n\n :param other: StructureGraph\n :param strict: if False, will compare bonds\n from different Structures, with node indices\n replaced by Specie strings, will not count\n number of occurrences of bonds\n :return:\n \"\"\"\n\n if self.structure != other.structure and strict:\n return ValueError(\"Meaningless to compare StructureGraphs if \"\n \"corresponding Structures are different.\")\n\n if strict:\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d['to_jimage'])\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d['to_jimage'])\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n else:\n\n edges = {(str(self.structure[u].specie),\n str(self.structure[v].specie))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(str(other.structure[u].specie),\n str(other.structure[v].specie))\n for u, v, d in other.graph.edges(keys=False, data=True)}\n\n if len(edges) == 0 and len(edges_other) == 0:\n jaccard_dist = 0 # by definition\n else:\n jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))\n\n return {\n 'self': edges - edges_other,\n 'other': edges_other - edges,\n 'both': edges.intersection(edges_other),\n 'dist': jaccard_dist\n }\n\n def get_subgraphs_as_molecules(self, use_weights=False):\n \"\"\"\n Retrieve subgraphs as molecules, useful for extracting\n molecules from periodic crystals.\n\n Will only return unique molecules, not any duplicates\n present in the crystal (a duplicate defined as an\n isomorphic subgraph).\n\n :param use_weights (bool): If True, only treat subgraphs\n as isomorphic if edges have the same weights. Typically,\n this means molecules will need to have the same bond\n lengths to be defined as duplicates, otherwise bond\n lengths can differ. This is a fairly robust approach,\n but will treat e.g. enantiomers as being duplicates.\n\n :return: list of unique Molecules in Structure\n \"\"\"\n\n # creating a supercell is an easy way to extract\n # molecules (and not, e.g., layers of a 2D crystal)\n # without adding extra logic\n if getattr(self, '_supercell_sg', None) is None:\n self._supercell_sg = supercell_sg = self*(3,3,3)\n\n # make undirected to find connected subgraphs\n supercell_sg.graph = nx.Graph(supercell_sg.graph)\n\n # find subgraphs\n all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))\n\n # discount subgraphs that lie across *supercell* boundaries\n # these will subgraphs representing crystals\n molecule_subgraphs = []\n for subgraph in all_subgraphs:\n intersects_boundary = any([d['to_jimage'] != (0, 0, 0)\n for u, v, d in subgraph.edges(data=True)])\n if not intersects_boundary:\n molecule_subgraphs.append(subgraph)\n\n # add specie names to graph to be able to test for isomorphism\n for subgraph in molecule_subgraphs:\n for n in subgraph:\n subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))\n\n # now define how we test for isomorphism\n def node_match(n1, n2):\n return n1['specie'] == n2['specie']\n def edge_match(e1, e2):\n if use_weights:\n return e1['weight'] == e2['weight']\n else:\n return True\n\n # prune duplicate subgraphs\n unique_subgraphs = []\n for subgraph in molecule_subgraphs:\n\n already_present = [nx.is_isomorphic(subgraph, g,\n node_match=node_match,\n edge_match=edge_match)\n for g in unique_subgraphs]\n\n if not any(already_present):\n unique_subgraphs.append(subgraph)\n\n # get Molecule objects for each subgraph\n molecules = []\n for subgraph in unique_subgraphs:\n\n coords = [supercell_sg.structure[n].coords for n\n in subgraph.nodes()]\n species = [supercell_sg.structure[n].specie for n\n in subgraph.nodes()]\n\n molecule = Molecule(species, coords)\n\n # shift so origin is at center of mass\n molecule = molecule.get_centered_molecule()\n\n molecules.append(molecule)\n\n return molecules\n\n\nclass MolGraphSplitError(Exception):\n # Raised when a molecule graph is failed to split into two disconnected\n # subgraphs\n pass\n\n\nclass MoleculeGraph(MSONable):\n \"\"\"\n This is a class for annotating a Molecule with\n bond information, stored in the form of a graph. A \"bond\" does\n not necessarily have to be a chemical bond, but can store any\n kind of information that connects two Sites.\n \"\"\"\n\n def __init__(self, molecule, graph_data=None):\n \"\"\"\n If constructing this class manually, use the `with_empty_graph`\n method or `with_local_env_strategy` method (using an algorithm\n provided by the `local_env` module, such as O'Keeffe).\n\n This class that contains connection information:\n relationships between sites represented by a Graph structure,\n and an associated structure object.\n\n This class uses the NetworkX package to store and operate\n on the graph itself, but contains a lot of helper methods\n to make associating a graph with a given molecule easier.\n\n Use cases for this include storing bonding information,\n NMR J-couplings, Heisenberg exchange parameters, etc.\n\n :param molecule: Molecule object\n\n :param graph_data: dict containing graph information in\n dict format (not intended to be constructed manually,\n see as_dict method for format)\n \"\"\"\n\n if isinstance(molecule, MoleculeGraph):\n # just make a copy from input\n graph_data = molecule.as_dict()['graphs']\n\n self.molecule = molecule\n self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)\n\n # tidy up edge attr dicts, reading to/from json duplicates\n # information\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if 'id' in d:\n del d['id']\n if 'key' in d:\n del d['key']\n # ensure images are tuples (conversion to lists happens\n # when serializing back from json), it's important images\n # are hashable/immutable\n if 'to_jimage' in d:\n d['to_jimage'] = tuple(d['to_jimage'])\n if 'from_jimage' in d:\n d['from_jimage'] = tuple(d['from_jimage'])\n\n self.set_node_attributes()\n\n @classmethod\n def with_empty_graph(cls, molecule, name=\"bonds\",\n edge_weight_name=None,\n edge_weight_units=None):\n \"\"\"\n Constructor for MoleculeGraph, returns a MoleculeGraph\n object with an empty graph (no edges, only nodes defined\n that correspond to Sites in Molecule).\n\n :param molecule (Molecule):\n :param name (str): name of graph, e.g. \"bonds\"\n :param edge_weight_name (str): name of edge weights,\n e.g. \"bond_length\" or \"exchange_constant\"\n :param edge_weight_units (str): name of edge weight units\n e.g. \"Å\" or \"eV\"\n :return (MoleculeGraph):\n \"\"\"\n\n if edge_weight_name and (edge_weight_units is None):\n raise ValueError(\"Please specify units associated \"\n \"with your edge weights. Can be \"\n \"empty string if arbitrary or \"\n \"dimensionless.\")\n\n # construct graph with one node per site\n # graph attributes don't change behavior of graph,\n # they're just for book-keeping\n graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,\n edge_weight_units=edge_weight_units,\n name=name)\n graph.add_nodes_from(range(len(molecule)))\n\n graph_data = json_graph.adjacency_data(graph)\n\n return cls(molecule, graph_data=graph_data)\n\n @staticmethod\n def with_edges(molecule, edges):\n \"\"\"\n Constructor for MoleculeGraph, using pre-existing or pre-defined edges\n with optional edge parameters.\n\n :param molecule: Molecule object\n :param edges: dict representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. Props should be None if no\n additional properties are to be specified.\n :return: mg, a MoleculeGraph\n \"\"\"\n\n mg = MoleculeGraph.with_empty_graph(molecule, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for edge, props in edges.items():\n\n try:\n from_index = edge[0]\n to_index = edge[1]\n except TypeError:\n raise ValueError(\"Edges must be given as (from_index, to_index)\"\n \"tuples\")\n\n if props is not None:\n if \"weight\" in props.keys():\n weight = props[\"weight\"]\n del props[\"weight\"]\n else:\n weight = None\n\n if len(props.items()) == 0:\n props = None\n else:\n weight = None\n\n nodes = mg.graph.nodes\n if not (from_index in nodes and to_index in nodes):\n raise ValueError(\"Edges cannot be added if nodes are not\"\n \" present in the graph. Please check your\"\n \" indices.\")\n\n mg.add_edge(from_index, to_index, weight=weight,\n edge_properties=props)\n\n mg.set_node_attributes()\n return mg\n\n @staticmethod\n def with_local_env_strategy(molecule, strategy, reorder=True,\n extend_structure=True):\n \"\"\"\n Constructor for MoleculeGraph, using a strategy\n from :Class: `pymatgen.analysis.local_env`.\n\n :param molecule: Molecule object\n :param strategy: an instance of a\n :Class: `pymatgen.analysis.local_env.NearNeighbors` object\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return: mg, a MoleculeGraph\n \"\"\"\n\n mg = MoleculeGraph.with_empty_graph(molecule, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n # NearNeighbor classes only (generally) work with structures\n # molecules have to be boxed first\n coords = molecule.cart_coords\n\n if extend_structure:\n a = max(coords[:, 0]) - min(coords[:, 0]) + 100\n b = max(coords[:, 1]) - min(coords[:, 1]) + 100\n c = max(coords[:, 2]) - min(coords[:, 2]) + 100\n\n molecule = molecule.get_boxed_structure(a, b, c, no_cross=True)\n\n for n in range(len(molecule)):\n neighbors = strategy.get_nn_info(molecule, n)\n for neighbor in neighbors:\n\n # all bonds in molecules should not cross\n # (artificial) periodic boundaries\n if not np.array_equal(neighbor['image'], [0, 0, 0]):\n continue\n\n # local_env will always try to add two edges\n # for any one bond, one from site u to site v\n # and another form site v to site u: this is\n # harmless, so warn_duplicates=False\n mg.add_edge(from_index=n,\n to_index=neighbor['site_index'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n if reorder:\n # Reverse order of nodes to match with molecule\n n = len(mg.molecule)\n mapping = {i: (n-i) for i in range(n)}\n mapping = {i: (j-1) for i, j in mapping.items()}\n\n mg.graph = nx.relabel_nodes(mg.graph, mapping)\n\n duplicates = []\n for edge in mg.graph.edges:\n if edge[2] != 0:\n duplicates.append(edge)\n\n for duplicate in duplicates:\n mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])\n\n mg.set_node_attributes()\n return mg\n\n @property\n def name(self):\n \"\"\"\n :return: Name of graph\n \"\"\"\n return self.graph.graph['name']\n\n @property\n def edge_weight_name(self):\n \"\"\"\n :return: Name of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_name']\n\n @property\n def edge_weight_unit(self):\n \"\"\"\n :return: Units of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_units']\n\n def add_edge(self, from_index, to_index,\n weight=None, warn_duplicates=True,\n edge_properties=None):\n \"\"\"\n Add edge to graph.\n\n Since physically a 'bond' (or other connection\n between sites) doesn't have a direction, from_index,\n from_jimage can be swapped with to_index, to_jimage.\n\n However, images will always always be shifted so that\n from_index < to_index and from_jimage becomes (0, 0, 0).\n\n :param from_index: index of site connecting from\n :param to_index: index of site connecting to\n :param weight (float): e.g. bond length\n :param warn_duplicates (bool): if True, will warn if\n trying to add duplicate edges (duplicate edges will not\n be added in either case)\n :param edge_properties (dict): any other information to\n store on graph edges, similar to Structure's site_properties\n :return:\n \"\"\"\n\n # this is not necessary for the class to work, but\n # just makes it neater\n if to_index < from_index:\n to_index, from_index = from_index, to_index\n\n # sanitize types\n from_index, to_index = int(from_index), int(to_index)\n\n # check we're not trying to add a duplicate edge\n # there should only ever be at most one edge\n # between two sites\n existing_edge_data = self.graph.get_edge_data(from_index, to_index)\n if existing_edge_data and warn_duplicates:\n warnings.warn(\"Trying to add an edge that already exists from \"\n \"site {} to site {}.\".format(from_index,\n to_index))\n return\n\n # generic container for additional edge properties,\n # similar to site properties\n edge_properties = edge_properties or {}\n\n if weight:\n self.graph.add_edge(from_index, to_index,\n weight=weight,\n **edge_properties)\n else:\n self.graph.add_edge(from_index, to_index,\n **edge_properties)\n\n def insert_node(self, i, species, coords, validate_proximity=False,\n site_properties=None, edges=None):\n \"\"\"\n A wrapper around Molecule.insert(), which also incorporates the new\n site into the MoleculeGraph.\n\n :param i: Index at which to insert the new site\n :param species: Species for the new site\n :param coords: 3x1 array representing coordinates of the new site\n :param validate_proximity: For Molecule.insert(); if True (default\n False), distance will be checked to ensure that site can be safely\n added.\n :param site_properties: Site properties for Molecule\n :param edges: List of dicts representing edges to be added to the\n MoleculeGraph. These edges must include the index of the new site i,\n and all indices used for these edges should reflect the\n MoleculeGraph AFTER the insertion, NOT before. Each dict should at\n least have a \"to_index\" and \"from_index\" key, and can also have a\n \"weight\" and a \"properties\" key.\n :return:\n \"\"\"\n\n self.molecule.insert(i, species, coords,\n validate_proximity=validate_proximity,\n properties=site_properties)\n\n mapping = {}\n for j in range(len(self.molecule) - 1):\n if j < i:\n mapping[j] = j\n else:\n mapping[j] = j + 1\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n self.graph.add_node(i)\n self.set_node_attributes()\n\n if edges is not None:\n for edge in edges:\n try:\n self.add_edge(edge[\"from_index\"], edge[\"to_index\"],\n weight=edge.get(\"weight\", None),\n edge_properties=edge.get(\"properties\", None))\n except KeyError:\n raise RuntimeError(\"Some edges are invalid.\")\n\n def set_node_attributes(self):\n \"\"\"\n Replicates molecule site properties (specie, coords, etc.) in the\n MoleculeGraph.\n\n :return:\n \"\"\"\n\n species = {}\n coords = {}\n properties = {}\n for node in self.graph.nodes():\n species[node] = self.molecule[node].specie.symbol\n coords[node] = self.molecule[node].coords\n properties[node] = self.molecule[node].properties\n\n nx.set_node_attributes(self.graph, species, \"specie\")\n nx.set_node_attributes(self.graph, coords, \"coords\")\n nx.set_node_attributes(self.graph, properties, \"properties\")\n\n def alter_edge(self, from_index, to_index,\n new_weight=None, new_edge_properties=None):\n \"\"\"\n Alters either the weight or the edge_properties of\n an edge in the MoleculeGraph.\n\n :param from_index: int\n :param to_index: int\n :param new_weight: alter_edge does not require\n that weight be altered. As such, by default, this\n is None. If weight is to be changed, it should be a\n float.\n :param new_edge_properties: alter_edge does not require\n that edge_properties be altered. As such, by default,\n this is None. If any edge properties are to be changed,\n it should be a dictionary of edge properties to be changed.\n :return:\n \"\"\"\n\n existing_edge = self.graph.get_edge_data(from_index, to_index)\n\n # ensure that edge exists before attempting to change it\n if not existing_edge:\n raise ValueError(\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n # Third index should always be 0 because there should only be one edge between any two nodes\n if new_weight is not None:\n self.graph[from_index][to_index][0]['weight'] = new_weight\n\n if new_edge_properties is not None:\n for prop in list(new_edge_properties.keys()):\n self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]\n\n def break_edge(self, from_index, to_index, allow_reverse=False):\n \"\"\"\n Remove an edge from the MoleculeGraph\n\n :param from_index: int\n :param to_index: int\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return:\n \"\"\"\n\n # ensure that edge exists before attempting to remove it\n existing_edge = self.graph.get_edge_data(from_index, to_index)\n existing_reverse = None\n\n if existing_edge:\n self.graph.remove_edge(from_index, to_index)\n\n else:\n if allow_reverse:\n existing_reverse = self.graph.get_edge_data(to_index,\n from_index)\n\n if existing_reverse:\n self.graph.remove_edge(to_index, from_index)\n else:\n raise ValueError(\"Edge cannot be broken between {} and {};\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n def remove_nodes(self, indices):\n \"\"\"\n A wrapper for Molecule.remove_sites().\n\n :param indices: list of indices in the current Molecule (and graph) to\n be removed.\n :return:\n \"\"\"\n\n self.molecule.remove_sites(indices)\n self.graph.remove_nodes_from(indices)\n\n mapping = {}\n for correct, current in enumerate(sorted(self.graph.nodes)):\n mapping[current] = correct\n\n nx.relabel_nodes(self.graph, mapping, copy=False)\n self.set_node_attributes()\n\n def split_molecule_subgraphs(self, bonds, allow_reverse=False,\n alterations=None):\n \"\"\"\n Split MoleculeGraph into two or more MoleculeGraphs by\n breaking a set of bonds. This function uses\n MoleculeGraph.break_edge repeatedly to create\n disjoint graphs (two or more separate molecules).\n This function does not only alter the graph\n information, but also changes the underlying\n Moledules.\n If the bonds parameter does not include sufficient\n bonds to separate two molecule fragments, then this\n function will fail.\n Currently, this function naively assigns the charge\n of the total molecule to a single submolecule. A\n later effort will be to actually accurately assign\n charge.\n NOTE: This function does not modify the original\n MoleculeGraph. It creates a copy, modifies that, and\n returns two or more new MoleculeGraph objects.\n\n :param bonds: list of tuples (from_index, to_index)\n representing bonds to be broken to split the MoleculeGraph.\n :param alterations: a dict {(from_index, to_index): alt},\n where alt is a dictionary including weight and/or edge\n properties to be changed following the split.\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return: list of MoleculeGraphs\n \"\"\"\n\n self.set_node_attributes()\n\n original = copy.deepcopy(self)\n\n for bond in bonds:\n original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)\n\n if nx.is_weakly_connected(original.graph):\n raise MolGraphSplitError(\"Cannot split molecule; \\\n MoleculeGraph is still connected.\")\n else:\n\n # alter any bonds before partition, to avoid remapping\n if alterations is not None:\n for (u, v) in alterations.keys():\n if \"weight\" in alterations[(u, v)]:\n weight = alterations[(u, v)][\"weight\"]\n del alterations[(u, v)][\"weight\"]\n edge_properties = alterations[(u, v)] \\\n if len(alterations[(u, v)]) != 0 else None\n original.alter_edge(u, v, new_weight=weight,\n new_edge_properties=edge_properties)\n else:\n original.alter_edge(u, v,\n new_edge_properties=alterations[(u, v)])\n\n sub_mols = []\n\n # Had to use nx.weakly_connected_components because of deprecation\n # of nx.weakly_connected_component_subgraphs\n components = nx.weakly_connected_components(original.graph)\n subgraphs = [original.graph.subgraph(c) for c in components]\n\n for subg in subgraphs:\n\n nodes = sorted(list(subg.nodes))\n\n # Molecule indices are essentially list-based, so node indices\n # must be remapped, incrementing from 0\n mapping = {}\n for i in range(len(nodes)):\n mapping[nodes[i]] = i\n\n # just give charge to whatever subgraph has node with index 0\n # TODO: actually figure out how to distribute charge\n if 0 in nodes:\n charge = self.molecule.charge\n else:\n charge = 0\n\n # relabel nodes in graph to match mapping\n new_graph = nx.relabel_nodes(subg, mapping)\n\n species = nx.get_node_attributes(new_graph, \"specie\")\n coords = nx.get_node_attributes(new_graph, \"coords\")\n raw_props = nx.get_node_attributes(new_graph, \"properties\")\n\n properties = {}\n for prop_set in raw_props.values():\n for prop in prop_set.keys():\n if prop in properties:\n properties[prop].append(prop_set[prop])\n else:\n properties[prop] = [prop_set[prop]]\n\n # Site properties must be present for all atoms in the molecule\n # in order to be used for Molecule instantiation\n for k, v in properties.items():\n if len(v) != len(species):\n del properties[k]\n\n new_mol = Molecule(species, coords, charge=charge,\n site_properties=properties)\n graph_data = json_graph.adjacency_data(new_graph)\n\n # create new MoleculeGraph\n sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))\n\n return sub_mols\n\n def build_unique_fragments(self):\n \"\"\"\n Find all possible fragment combinations of the MoleculeGraphs (in other\n words, all connected induced subgraphs)\n\n :return:\n \"\"\"\n self.set_node_attributes()\n\n graph = self.graph.to_undirected()\n\n nm = iso.categorical_node_match(\"specie\", \"ERROR\")\n\n # find all possible fragments, aka connected induced subgraphs\n all_fragments = []\n for ii in range(1, len(self.molecule)):\n for combination in combinations(graph.nodes, ii):\n subgraph = nx.subgraph(graph, combination)\n if nx.is_connected(subgraph):\n all_fragments.append(subgraph)\n\n # narrow to all unique fragments using graph isomorphism\n unique_fragments = []\n for fragment in all_fragments:\n if not [nx.is_isomorphic(fragment, f, node_match=nm)\n for f in unique_fragments].count(True) >= 1:\n unique_fragments.append(fragment)\n\n # convert back to molecule graphs\n unique_mol_graphs = []\n for fragment in unique_fragments:\n mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}\n remapped = nx.relabel_nodes(fragment, mapping)\n\n species = nx.get_node_attributes(remapped, \"specie\")\n coords = nx.get_node_attributes(remapped, \"coords\")\n\n edges = {}\n\n for from_index, to_index, key in remapped.edges:\n edge_props = fragment.get_edge_data(from_index, to_index, key=key)\n\n edges[(from_index, to_index)] = edge_props\n\n unique_mol_graphs.append(self.with_edges(Molecule(species=species,\n coords=coords,\n charge=self.molecule.charge),\n edges))\n return unique_mol_graphs\n\n def substitute_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None, reorder=True,\n extend_structure=True):\n \"\"\"\n Builds off of Molecule.substitute to replace an atom in self.molecule\n with a functional group. This method also amends self.graph to\n incorporate the new functional group.\n\n NOTE: using a MoleculeGraph will generally produce a different graph\n compared with using a Molecule or str (when not using graph_dict).\n This is because of the reordering that occurs when using some of the\n local_env strategies.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are three options:\n\n 1. Providing an actual molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n 3. A MoleculeGraph object.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return:\n \"\"\"\n\n def map_indices(grp):\n grp_map = {}\n\n # Get indices now occupied by functional group\n # Subtracting 1 because the dummy atom X should not count\n atoms = len(grp) - 1\n offset = len(self.molecule) - atoms\n\n for i in range(atoms):\n grp_map[i] = i + offset\n\n return grp_map\n\n # Work is simplified if a graph is already in place\n if isinstance(func_grp, MoleculeGraph):\n\n self.molecule.substitute(index, func_grp.molecule,\n bond_order=bond_order)\n\n mapping = map_indices(func_grp.molecule)\n\n for (u, v) in list(func_grp.graph.edges()):\n edge_props = func_grp.graph.get_edge_data(u, v)[0]\n weight = None\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n else:\n if isinstance(func_grp, Molecule):\n func_grp = copy.deepcopy(func_grp)\n else:\n try:\n func_grp = copy.deepcopy(FunctionalGroups[func_grp])\n except:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n\n self.molecule.substitute(index, func_grp, bond_order=bond_order)\n\n mapping = map_indices(func_grp)\n\n # Remove dummy atom \"X\"\n func_grp.remove_species(\"X\")\n\n if graph_dict is not None:\n for (u, v) in graph_dict.keys():\n edge_props = graph_dict[(u, v)]\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n else:\n if strategy_params is None:\n strategy_params = {}\n strat = strategy(**strategy_params)\n graph = self.with_local_env_strategy(func_grp, strat, reorder=reorder,\n extend_structure=extend_structure)\n\n for (u, v) in list(graph.graph.edges()):\n edge_props = graph.graph.get_edge_data(u, v)[0]\n weight = None\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n\n if 0 not in list(graph.graph.nodes()):\n # If graph indices have different indexing\n u, v = (u-1), (v-1)\n\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n def replace_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None, reorder=True,\n extend_structure=True):\n \"\"\"\n Builds off of Molecule.substitute and MoleculeGraph.substitute_group\n to replace a functional group in self.molecule with a functional group.\n This method also amends self.graph to incorporate the new functional\n group.\n\n TODO: Figure out how to replace into a ring structure.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are three options:\n\n 1. Providing an actual molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n 3. A MoleculeGraph object.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return:\n \"\"\"\n\n self.set_node_attributes()\n neighbors = self.get_connected_sites(index)\n\n # If the atom at index is terminal\n if len(neighbors) == 1:\n self.substitute_group(index, func_grp, strategy,\n bond_order=bond_order, graph_dict=graph_dict,\n strategy_params=strategy_params,\n reorder=reorder,\n extend_structure=extend_structure)\n\n else:\n rings = self.find_rings(including=[index])\n if len(rings) != 0:\n raise RuntimeError(\"Currently functional group replacement\"\n \"cannot occur at an atom within a ring\"\n \"structure.\")\n\n to_remove = set()\n sizes = dict()\n disconnected = self.graph.to_undirected()\n disconnected.remove_node(index)\n for neighbor in neighbors:\n sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))\n\n keep = max(sizes, key=lambda x: sizes[x])\n for i in sizes.keys():\n if i != keep:\n to_remove.add(i)\n\n self.remove_nodes(list(to_remove))\n\n self.substitute_group(index, func_grp, strategy,\n bond_order=bond_order, graph_dict=graph_dict,\n strategy_params=strategy_params,\n reorder=reorder,\n extend_structure=extend_structure)\n\n def find_rings(self, including=None):\n \"\"\"\n Find ring structures in the MoleculeGraph.\n\n :param including: list of site indices. If\n including is not None, then find_rings will\n only return those rings including the specified\n sites. By default, this parameter is None, and\n all rings will be returned.\n :return: dict {index:cycle}. Each\n entry will be a ring (cycle, in graph theory terms) including the index\n found in the Molecule. If there is no cycle including an index, the\n value will be an empty list.\n \"\"\"\n\n # Copies self.graph such that all edges (u, v) matched by edges (v, u)\n undirected = self.graph.to_undirected()\n directed = undirected.to_directed()\n\n cycles_nodes = []\n cycles_edges = []\n\n # Remove all two-edge cycles\n all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]\n\n # Using to_directed() will mean that each cycle always appears twice\n # So, we must also remove duplicates\n unique_sorted = []\n unique_cycles = []\n for cycle in all_cycles:\n if sorted(cycle) not in unique_sorted:\n unique_sorted.append(sorted(cycle))\n unique_cycles.append(cycle)\n\n if including is None:\n cycles_nodes = unique_cycles\n else:\n for i in including:\n for cycle in unique_cycles:\n if i in cycle and cycle not in cycles_nodes:\n cycles_nodes.append(cycle)\n\n for cycle in cycles_nodes:\n edges = []\n for i, e in enumerate(cycle):\n edges.append((cycle[i-1], e))\n cycles_edges.append(edges)\n\n return cycles_edges\n\n def get_connected_sites(self, n):\n \"\"\"\n Returns a named tuple of neighbors of site n:\n periodic_site, jimage, index, weight.\n Index is the index of the corresponding site\n in the original structure, weight can be\n None if not defined.\n :param n: index of Site in Molecule\n :param jimage: lattice vector of site\n :return: list of ConnectedSite tuples,\n sorted by closest first\n \"\"\"\n\n connected_sites = set()\n\n out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]\n in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]\n\n for u, v, d in out_edges + in_edges:\n\n weight = d.get('weight', None)\n\n if v == n:\n site = self.molecule[u]\n dist = self.molecule[v].distance(self.molecule[u])\n\n connected_site = ConnectedSite(site=site,\n jimage=(0, 0, 0),\n index=u,\n weight=weight,\n dist=dist)\n else:\n site = self.molecule[v]\n dist = self.molecule[u].distance(self.molecule[v])\n\n connected_site = ConnectedSite(site=site,\n jimage=(0, 0, 0),\n index=v,\n weight=weight,\n dist=dist)\n\n connected_sites.add(connected_site)\n\n # return list sorted by closest sites first\n connected_sites = list(connected_sites)\n connected_sites.sort(key=lambda x: x.dist)\n\n return connected_sites\n\n def get_coordination_of_site(self, n):\n \"\"\"\n Returns the number of neighbors of site n.\n In graph terms, simply returns degree\n of node corresponding to site n.\n :param n: index of site\n :return (int):\n \"\"\"\n number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])\n return self.graph.degree(n) - number_of_self_loops\n\n def draw_graph_to_file(self, filename=\"graph\",\n diff=None,\n hide_unconnected_nodes=False,\n hide_image_edges=True,\n edge_colors=False,\n node_labels=False,\n weight_labels=False,\n image_labels=False,\n color_scheme=\"VESTA\",\n keep_dot=False,\n algo=\"fdp\"):\n \"\"\"\n Draws graph using GraphViz.\n\n The networkx graph object itself can also be drawn\n with networkx's in-built graph drawing methods, but\n note that this might give misleading results for\n multigraphs (edges are super-imposed on each other).\n\n If visualization is difficult to interpret,\n `hide_image_edges` can help, especially in larger\n graphs.\n\n :param filename: filename to output, will detect filetype\n from extension (any graphviz filetype supported, such as\n pdf or png)\n :param diff (StructureGraph): an additional graph to\n compare with, will color edges red that do not exist in diff\n and edges green that are in diff graph but not in the\n reference graph\n :param hide_unconnected_nodes: if True, hide unconnected\n nodes\n :param hide_image_edges: if True, do not draw edges that\n go through periodic boundaries\n :param edge_colors (bool): if True, use node colors to\n color edges\n :param node_labels (bool): if True, label nodes with\n species and site index\n :param weight_labels (bool): if True, label edges with\n weights\n :param image_labels (bool): if True, label edges with\n their periodic images (usually only used for debugging,\n edges to periodic images always appear as dashed lines)\n :param color_scheme (str): \"VESTA\" or \"JMOL\"\n :param keep_dot (bool): keep GraphViz .dot file for later\n visualization\n :param algo: any graphviz algo, \"neato\" (for simple graphs)\n or \"fdp\" (for more crowded graphs) usually give good outputs\n :return:\n \"\"\"\n\n if not which(algo):\n raise RuntimeError(\"StructureGraph graph drawing requires \"\n \"GraphViz binaries to be in the path.\")\n\n # Developer note: NetworkX also has methods for drawing\n # graphs using matplotlib, these also work here. However,\n # a dedicated tool like GraphViz allows for much easier\n # control over graph appearance and also correctly displays\n # mutli-graphs (matplotlib can superimpose multiple edges).\n\n g = self.graph.copy()\n\n g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': \"false\"}\n\n # add display options for nodes\n for n in g.nodes():\n\n # get label by species name\n label = \"{}({})\".format(str(self.molecule[n].specie), n) if node_labels else \"\"\n\n # use standard color scheme for nodes\n c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])\n\n # get contrasting font color\n # magic numbers account for perceived luminescence\n # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color\n fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587\n + c[2] * 0.114) / 255 < 0.5 else '#ffffff'\n\n # convert color to hex string\n color = \"#{:02x}{:02x}{:02x}\".format(c[0], c[1], c[2])\n\n g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,\n fontname=\"Helvetica-bold\", style=\"filled\", shape=\"circle\")\n\n edges_to_delete = []\n\n # add display options for edges\n for u, v, k, d in g.edges(keys=True, data=True):\n\n # retrieve from/to images, set as origin if not defined\n if \"to_image\" in d:\n to_image = d['to_jimage']\n else:\n to_image = (0, 0, 0)\n\n # set edge style\n d['style'] = \"solid\"\n if to_image != (0, 0, 0):\n d['style'] = \"dashed\"\n if hide_image_edges:\n edges_to_delete.append((u, v, k))\n\n # don't show edge directions\n d['arrowhead'] = \"none\"\n\n # only add labels for images that are not the origin\n if image_labels:\n d['headlabel'] = \"\" if to_image == (0, 0, 0) else \"to {}\".format((to_image))\n d['arrowhead'] = \"normal\" if d['headlabel'] else \"none\"\n\n # optionally color edges using node colors\n color_u = g.node[u]['fillcolor']\n color_v = g.node[v]['fillcolor']\n d['color_uv'] = \"{};0.5:{};0.5\".format(color_u, color_v) if edge_colors else \"#000000\"\n\n # optionally add weights to graph\n if weight_labels:\n units = g.graph.get('edge_weight_units', \"\")\n if d.get('weight'):\n d['label'] = \"{:.2f} {}\".format(d['weight'], units)\n\n # update edge with our new style attributes\n g.edges[u, v, k].update(d)\n\n # optionally remove periodic image edges,\n # these can be confusing due to periodic boundaries\n if hide_image_edges:\n for edge_to_delete in edges_to_delete:\n g.remove_edge(*edge_to_delete)\n\n # optionally hide unconnected nodes,\n # these can appear when removing periodic edges\n if hide_unconnected_nodes:\n g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])\n\n # optionally highlight differences with another graph\n if diff:\n diff = self.diff(diff, strict=True)\n green_edges = []\n red_edges = []\n for u, v, k, d in g.edges(keys=True, data=True):\n if (u, v, d['to_jimage']) in diff['self']:\n # edge has been deleted\n red_edges.append((u, v, k))\n elif (u, v, d['to_jimage']) in diff['other']:\n # edge has been added\n green_edges.append((u, v, k))\n for u, v, k in green_edges:\n g.edges[u, v, k].update({'color_uv': '#00ff00'})\n for u, v, k in red_edges:\n g.edges[u, v, k].update({'color_uv': '#ff0000'})\n\n basename, extension = os.path.splitext(filename)\n extension = extension[1:]\n\n write_dot(g, basename+\".dot\")\n\n with open(filename, \"w\") as f:\n\n args = [algo, \"-T\", extension, basename+\".dot\"]\n rs = subprocess.Popen(args,\n stdout=f,\n stdin=subprocess.PIPE, close_fds=True)\n rs.communicate()\n if rs.returncode != 0:\n raise RuntimeError(\"{} exited with return code {}.\".format(algo, rs.returncode))\n\n if not keep_dot:\n os.remove(basename+\".dot\")\n\n def as_dict(self):\n \"\"\"\n As in :Class: `pymatgen.core.Molecule` except\n with using `to_dict_of_dicts` from NetworkX\n to store graph information.\n \"\"\"\n\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"molecule\": self.molecule.as_dict(),\n \"graphs\": json_graph.adjacency_data(self.graph)}\n\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n As in :Class: `pymatgen.core.Molecule` except\n restoring graphs using `from_dict_of_dicts`\n from NetworkX to restore graph information.\n \"\"\"\n m = Molecule.from_dict(d['molecule'])\n return cls(m, d['graphs'])\n\n def _edges_to_string(self, g):\n\n header = \"from to to_image \"\n header_line = \"---- ---- ------------\"\n edge_weight_name = g.graph[\"edge_weight_name\"]\n if edge_weight_name:\n print_weights = [\"weight\"]\n edge_label = g.graph[\"edge_weight_name\"]\n edge_weight_units = g.graph[\"edge_weight_units\"]\n if edge_weight_units:\n edge_label += \" ({})\".format(edge_weight_units)\n header += \" {}\".format(edge_label)\n header_line += \" {}\".format(\"-\"*max([18, len(edge_label)]))\n else:\n print_weights = False\n\n s = header + \"\\n\" + header_line + \"\\n\"\n\n edges = list(g.edges(data=True))\n\n # sort edges for consistent ordering\n edges.sort(key=itemgetter(0, 1))\n\n if print_weights:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12} {:.3e}\\n\".format(u, v, str(data.get(\"to_jimage\", (0, 0, 0))),\n data.get(\"weight\", 0))\n else:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12}\\n\".format(u, v,\n str(data.get(\"to_jimage\", (0, 0, 0))))\n\n return s\n\n def __str__(self):\n s = \"Molecule Graph\"\n s += \"\\nMolecule: \\n{}\".format(self.molecule.__str__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __repr__(self):\n s = \"Molecule Graph\"\n s += \"\\nMolecule: \\n{}\".format(self.molecule.__repr__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __len__(self):\n \"\"\"\n :return: length of Molecule / number of nodes in graph\n \"\"\"\n return len(self.molecule)\n\n def sort(self, key=None, reverse=False):\n \"\"\"\n Same as Molecule.sort(), also remaps nodes in graph.\n :param key:\n :param reverse:\n :return:\n \"\"\"\n\n old_molecule = self.molecule.copy()\n\n # sort Molecule\n self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)\n\n # apply Molecule ordering to graph\n mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}\n self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)\n\n # normalize directions of edges\n edges_to_remove = []\n edges_to_add = []\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if v < u:\n new_v, new_u, new_d = u, v, d.copy()\n new_d['to_jimage'] = (0, 0, 0)\n edges_to_remove.append((u, v, k))\n edges_to_add.append((new_u, new_v, new_d))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n self.graph.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n self.graph.add_edge(u, v, **d)\n\n def __copy__(self):\n return MoleculeGraph.from_dict(self.as_dict())\n\n def __eq__(self, other):\n \"\"\"\n Two MoleculeGraphs are equal if they have equal Molecules,\n and have the same edges between Sites. Edge weights can be\n different and MoleculeGraphs can still be considered equal.\n\n :param other: MoleculeGraph\n :return (bool):\n \"\"\"\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n try:\n mapping = {tuple(site.coords):self.molecule.index(site) for site in other.molecule}\n except ValueError:\n return False\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])\n\n edges = {(u, v)\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n return (edges == edges_other) and \\\n (self.molecule == other_sorted.molecule)\n\n def isomorphic_to(self, other):\n \"\"\"\n Checks if the graphs of two MoleculeGraphs are isomorphic to one\n another. In order to prevent problems with misdirected edges, both\n graphs are converted into undirected nx.Graph objects.\n\n :param other: MoleculeGraph object to be compared.\n :return: bool\n \"\"\"\n if self.molecule.composition != other.molecule.composition:\n return False\n else:\n self_undir = self.graph.to_undirected()\n other_undir = other.graph.to_undirected()\n nm = iso.categorical_node_match(\"specie\", \"ERROR\")\n isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)\n return isomorphic\n\n def diff(self, other, strict=True):\n \"\"\"\n Compares two MoleculeGraphs. Returns dict with\n keys 'self', 'other', 'both' with edges that are\n present in only one MoleculeGraph ('self' and\n 'other'), and edges that are present in both.\n\n The Jaccard distance is a simple measure of the\n dissimilarity between two MoleculeGraphs (ignoring\n edge weights), and is defined by 1 - (size of the\n intersection / size of the union) of the sets of\n edges. This is returned with key 'dist'.\n\n Important note: all node indices are in terms\n of the MoleculeGraph this method is called\n from, not the 'other' MoleculeGraph: there\n is no guarantee the node indices will be the\n same if the underlying Molecules are ordered\n differently.\n\n :param other: MoleculeGraph\n :param strict: if False, will compare bonds\n from different Molecules, with node indices\n replaced by Specie strings, will not count\n number of occurrences of bonds\n :return:\n \"\"\"\n\n if self.molecule != other.molecule and strict:\n return ValueError(\"Meaningless to compare MoleculeGraphs if \"\n \"corresponding Molecules are different.\")\n\n if strict:\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.molecule.index(site) for site in other.molecule}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d.get('to_jimage', (0, 0, 0)))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d.get('to_jimage', (0, 0, 0)))\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n else:\n\n edges = {(str(self.molecule[u].specie),\n str(self.molecule[v].specie))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(str(other.structure[u].specie),\n str(other.structure[v].specie))\n for u, v, d in other.graph.edges(keys=False, data=True)}\n\n if len(edges) == 0 and len(edges_other) == 0:\n jaccard_dist = 0 # by definition\n else:\n jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))\n\n return {\n 'self': edges - edges_other,\n 'other': edges_other - edges,\n 'both': edges.intersection(edges_other),\n 'dist': jaccard_dist\n }\n" ]
[ [ "numpy.eye", "numpy.multiply", "scipy.spatial.KDTree", "numpy.subtract", "scipy.stats.describe", "numpy.add", "numpy.array_equal", "numpy.array", "numpy.around", "numpy.dot" ] ]
mpopescu/compas
[ "55f259607deea501f862cbaea79bd97d7e56ead6" ]
[ "src/compas/numerical/pca/pca_numpy.py" ]
[ "from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom numpy import asarray\nfrom scipy.linalg import svd\n\n\n__all__ = ['pca_numpy']\n\n\ndef pca_numpy(data):\n \"\"\"Compute the principle components of a set of data points.\n\n Parameters\n ----------\n data : list\n A list of `m` observations, measuring `n` variables.\n For example, if the data are points in 2D space, the data parameter\n should contain `m` nested lists of `2` variables, the `x` and `y`\n coordinates.\n\n Returns\n -------\n tuple\n * The ``mean of the data points``.\n * The principle directions.\n The number of principle directions is equal to the dimensionality of the data.\n For example, if the data points are locations in 3D space, three principle components will be returned.\n If the data points are locations in 2D space, only two principle components will be returned.\n * The *spread* of the data along the principle directions.\n\n Notes\n -----\n PCA of a dataset finds the directions along which the variance of the data\n is largest, i.e. the directions along which the data is most spread out.\n\n Examples\n --------\n >>>\n\n \"\"\"\n X = asarray(data)\n n, dim = X.shape\n\n assert n >= dim, \"The number of observations (n) should be higher than the number of measured variables (dimensions).\"\n\n # the average of the observations for each of the variables\n # for example, if the data are 2D point coordinates,\n # the average is the average of the x-coordinate across all observations\n # and the average of the y-coordinate across all observations\n mean = (X.sum(axis=0) / n).reshape((-1, dim))\n\n # the spread matrix\n # i.e. the variation of each variable compared to the average of the variable\n # across all observations\n Y = X - mean\n\n # covariance matrix of spread\n # note: there is a covariance function in NumPy...\n # the shape of the covariance matrix is dim x dim\n # for example, if the data are 2D point coordinates, the shape of C is 2 x 2\n # the diagonal of the covariance matrix contains the variance of each variable\n # the off-diagonal elements of the covariance matrix contain the covariance\n # of two independent variables\n C = Y.T.dot(Y) / (n - 1)\n\n assert C.shape[0] == dim, \"The shape of the covariance matrix is not correct.\"\n\n # SVD of covariance matrix\n u, s, vT = svd(C, full_matrices=False)\n\n # eigenvectors\n # ------------\n # note: the eigenvectors are normalized\n # note: vT is exactly what it says it will be => the transposed eigenvectors\n # => take the rows of vT, or the columns of v\n # the right-singular vectors of C (the columns of V or the rows of Vt)\n # are the eigenvectors of CtC\n eigenvectors = vT\n\n # eigenvalues\n # -----------\n # the nonzero singular values of C are the square roots\n # of the nonzero eigenvalues of CtC and CCt\n eigenvalues = s\n\n # return\n return mean[0], eigenvectors, eigenvalues\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n import doctest\n\n doctest.testmod(globs=globals())\n" ]
[ [ "numpy.asarray", "scipy.linalg.svd" ] ]
dasolhwang/tf-mobilenet-v2
[ "e0e8f936e63e14561d9d25b77256d1cadb85172a" ]
[ "mobilenet_v2.py" ]
[ "\"\"\"\nMobileNet v2.\n\nAs described in https://arxiv.org/abs/1801.04381\n\n Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple, OrderedDict\n\nimport functools\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\nConv = namedtuple('Conv', ['kernel', 'stride', 'channel'])\nInvertedBottleneck = namedtuple('InvertedBottleneck', ['up_sample', 'channel', 'stride', 'repeat'])\n\n# Sequence of layers, described in Table 2\n_CONV_DEFS = [\n Conv(kernel=[3, 3], stride=2, channel=32), # first block, input 224x224x3\n InvertedBottleneck(up_sample=1, channel=16, stride=1, repeat=1), # second block, input : 112x112x32\n InvertedBottleneck(up_sample=6, channel=24, stride=2, repeat=2), # third block, input: 112x112x16\n InvertedBottleneck(up_sample=6, channel=32, stride=2, repeat=3), # fourth block, input: 56x56x24\n InvertedBottleneck(up_sample=6, channel=64, stride=2, repeat=4), # fifth block, input: 28x28x32\n InvertedBottleneck(up_sample=6, channel=96, stride=1, repeat=3), # sixth block, input: 28x28x64\n InvertedBottleneck(up_sample=6, channel=160, stride=2, repeat=3), # seventh block, input: 14x14x96\n InvertedBottleneck(up_sample=6, channel=320, stride=1, repeat=1), # eighth block, input: 7x7x160\n Conv(kernel=[1, 1], stride=1, channel=1280),\n # AvgPool(kernel=[7, 7]),\n # Conv(kernel=[1, 1], stride=1, channel='num_class')\n]\n\n\ndef mobilenet_v2_base(inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n scope=None):\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n end_points = OrderedDict()\n\n if conv_defs is None:\n conv_defs = _CONV_DEFS\n\n net = inputs\n with tf.variable_scope(scope, 'MobilenetV2', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):\n for i, conv_def in enumerate(conv_defs):\n\n end_point = ''\n if isinstance(conv_def, Conv):\n end_point = 'Conv2d_%d' % i\n num_channel = depth(conv_def.channel)\n net = slim.conv2d(net, num_channel, conv_def.kernel,\n activation_fn=tf.nn.relu6,\n stride=conv_def.stride,\n scope=end_point)\n end_points[end_point] = net\n elif isinstance(conv_def, InvertedBottleneck):\n stride = conv_def.stride\n\n if conv_def.repeat <= 0:\n raise ValueError('repeat value of inverted bottleneck should be greater than zero.')\n\n for j in range(conv_def.repeat):\n end_point = 'InvertedBottleneck_%d_%d' % (i, j)\n prev_output = net\n net = slim.conv2d(net, conv_def.up_sample * net.get_shape().as_list()[-1], [1, 1],\n activation_fn=tf.nn.relu6,\n scope=end_point + '_inverted_bottleneck')\n end_points[end_point + '_inverted_bottleneck'] = net\n net = slim.separable_conv2d(net, None, [3, 3],\n depth_multiplier=1,\n stride=stride,\n activation_fn=tf.nn.relu6,\n scope=end_point + '_dwise')\n end_points[end_point + '_dwise'] = net\n\n num_channel = depth(conv_def.channel)\n net = slim.conv2d(net, num_channel, [1, 1],\n activation_fn=None,\n scope=end_point + '_linear')\n end_points[end_point + '_linear'] = net\n\n if stride == 1:\n if prev_output.get_shape().as_list()[-1] != net.get_shape().as_list()[-1]:\n # Assumption based on previous ResNet papers: If the number of filters doesn't match,\n # there should be a conv 1x1 operation.\n # reference(pytorch) : https://github.com/MG2033/MobileNet-V2/blob/master/layers.py#L29\n prev_output = slim.conv2d(prev_output, num_channel, [1, 1],\n activation_fn=None,\n biases_initializer=None,\n scope=end_point + '_residual_match')\n\n # as described in Figure 4.\n net = tf.add(prev_output, net, name=end_point + '_residual_add')\n end_points[end_point + '_residual_add'] = net\n\n stride = 1\n else:\n raise ValueError('CONV_DEF is not valid.')\n\n if end_point == final_endpoint:\n break\n\n return net, end_points\n\n\ndef mobilenet_v2_cls(inputs,\n num_classes=1000,\n dropout_keep_prob=0.999,\n is_training=True,\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n prediction_fn=tf.contrib.layers.softmax,\n reuse=None,\n scope='MobilenetV2'):\n input_shape = inputs.get_shape().as_list()\n if len(input_shape) != 4:\n raise ValueError('Invalid input tensor rank, expected 4, was: %d' %\n len(input_shape))\n\n with tf.variable_scope(scope, 'MobilenetV2', [inputs], reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):\n net, end_points = mobilenet_v2_base(inputs, scope=scope,\n min_depth=min_depth,\n depth_multiplier=depth_multiplier,\n conv_defs=conv_defs)\n with tf.variable_scope('Logits'):\n # class\n if num_classes:\n net = slim.dropout(net, keep_prob=dropout_keep_prob, is_training=is_training, scope='Dropout_1')\n # global pool\n # Issue #1 : https://github.com/ildoonet/tf-mobilenet-v2/issues/1\n net = tf.reduce_mean(net, [1, 2], keepdims=True, name='Global_pool')\n end_points['Global_pool'] = net\n\n # classification\n net = slim.dropout(net, keep_prob=dropout_keep_prob, is_training=is_training, scope='Dropout_2')\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n net = slim.flatten(net, scope='Flatten')\n end_points['Logits'] = net\n\n if prediction_fn:\n end_points['Predictions'] = prediction_fn(net, scope='Predictions')\n\n return net, end_points\n\n\ndef wrapped_partial(func, *args, **kwargs):\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\nmobilenet_v2_cls_075 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.75)\nmobilenet_v2_cls_050 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.50)\nmobilenet_v2_cls_025 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.25)\n\n\ndef mobilenet_v2_arg_scope(is_training=True,\n weight_decay=0.0004,\n stddev=0.01,\n regularize_depthwise=False):\n \"\"\"Defines the default MobilenetV2 arg scope.\n Args:\n is_training: Whether or not we're training the model.\n weight_decay: The weight decay to use for regularizing the model.\n stddev: The standard deviation of the trunctated normal weight initializer.\n regularize_depthwise: Whether or not apply regularization on depthwise.\n Returns:\n An `arg_scope` to use for the mobilenet v2 model.\n \"\"\"\n batch_norm_params = {\n 'is_training': is_training,\n 'center': True,\n 'scale': True,\n 'decay': 0.999,\n 'epsilon': 0.0001,\n 'fused': True,\n 'zero_debias_moving_mean': True\n }\n\n # Set weight_decay for weights in Conv and DepthSepConv layers.\n weights_init = tf.truncated_normal_initializer(stddev=stddev)\n regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n if regularize_depthwise:\n depthwise_regularizer = regularizer\n else:\n depthwise_regularizer = None\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n weights_initializer=weights_init,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\n with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc:\n return sc\n" ]
[ [ "tensorflow.truncated_normal_initializer", "tensorflow.reduce_mean", "tensorflow.add", "tensorflow.variable_scope", "tensorflow.contrib.layers.l2_regularizer" ] ]
JuliaSprenger/spikeinterface
[ "d5d3d3992a6d430d7008e16db4ee030734e685e5" ]
[ "spikeinterface/widgets/unitlocalization.py" ]
[ "import numpy as np\nimport matplotlib.pylab as plt\nfrom .basewidget import BaseWidget\n\nfrom probeinterface.plotting import plot_probe\n\nfrom spikeinterface.toolkit import compute_unit_centers_of_mass\n\nfrom .utils import get_unit_colors\n\n\nclass UnitLocalizationWidget(BaseWidget):\n \"\"\"\n Plot unit localization on probe.\n\n Parameters\n ----------\n waveform_extractor: WaveformaExtractor\n WaveformaExtractorr object\n peaks: None or numpy array\n Optionally can give already detected peaks\n to avoid multiple computation.\n unit_localisation: None or 2d array\n If None then it is computed with 'method' option\n method: str default 'center_of_mass'\n Method used to estimate unit localization if 'unit_localisation' is None\n method_kwargs: dict\n Option for the method\n unit_colors: None or dict\n A dict key is unit_id and value is any color format handled by matplotlib.\n If None, then the get_unit_colors() is internally used.\n figure: matplotlib figure\n The figure to be used. If not given a figure is created\n ax: matplotlib axis\n The axis to be used. If not given an axis is created\n\n Returns\n -------\n W: ProbeMapWidget\n The output widget\n \"\"\"\n\n def __init__(self, waveform_extractor, unit_localisation=None,\n method='center_of_mass', method_kwargs={'peak_sign': 'neg', 'num_channels': 10},\n unit_colors=None,\n figure=None, ax=None):\n BaseWidget.__init__(self, figure, ax)\n\n self.waveform_extractor = waveform_extractor\n self.unit_localisation = unit_localisation\n self.method = method\n self.method_kwargs = method_kwargs\n\n if unit_colors is None:\n unit_colors = get_unit_colors(waveform_extractor.sorting)\n self.unit_colors = unit_colors\n\n def plot(self):\n we = self.waveform_extractor\n unit_localisation = self.unit_localisation\n unit_ids = we.sorting.unit_ids\n\n if unit_localisation is None:\n assert self.method in ('center_of_mass',)\n\n if self.method == 'center_of_mass':\n coms = compute_unit_centers_of_mass(we, **self.method_kwargs)\n localisation = np.array([e for e in coms.values()])\n else:\n raise ValueError('UnitLocalizationWidget: method not implemented.')\n\n ax = self.ax\n probe = we.recording.get_probe()\n probe_shape_kwargs = dict(facecolor='w', edgecolor='k', lw=0.5, alpha=1.)\n contacts_kargs = dict(alpha=1., edgecolor='k', lw=0.5)\n poly_contact, poly_contour = plot_probe(probe, ax=ax,\n contacts_colors='w', contacts_kargs=contacts_kargs,\n probe_shape_kwargs=probe_shape_kwargs)\n poly_contact.set_zorder(2)\n if poly_contour is not None:\n poly_contour.set_zorder(1)\n\n ax.set_title('')\n\n color = np.array([self.unit_colors[unit_id] for unit_id in unit_ids])\n loc = ax.scatter(localisation[:, 0], localisation[:, 1], marker='1', color=color, s=80, lw=3)\n loc.set_zorder(3)\n\n\ndef plot_unit_localization(*args, **kwargs):\n W = UnitLocalizationWidget(*args, **kwargs)\n W.plot()\n return W\n\n\nplot_unit_localization.__doc__ = UnitLocalizationWidget.__doc__\n" ]
[ [ "numpy.array" ] ]
MRD-Git/Huggingface-course
[ "7c0440584e630cb8885c2a237bc6e8213cfd5572" ]
[ "drop/multilabel_classification/loss.py" ]
[ "# https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.ht\nfrom torch.nn.modules.loss import _Loss\nfrom typing import Optional\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nclass BCEWithLogitsLoss(_Loss):\n r\"\"\"This loss combines a `Sigmoid` layer and the `BCELoss` in one single\n class. This version is more numerically stable than using a plain `Sigmoid`\n followed by a `BCELoss` as, by combining the operations into one layer,\n we take advantage of the log-sum-exp trick for numerical stability.\n\n The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:\n\n .. math::\n \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = - w_n \\left[ y_n \\cdot \\log \\sigma(x_n)\n + (1 - y_n) \\cdot \\log (1 - \\sigma(x_n)) \\right],\n\n where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``\n (default ``'mean'``), then\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\\n \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}\n \\end{cases}\n\n This is used for measuring the error of a reconstruction in for example\n an auto-encoder. Note that the targets `t[i]` should be numbers\n between 0 and 1.\n\n It's possible to trade off recall and precision by adding weights to positive examples.\n In the case of multi-label classification the loss can be described as:\n\n .. math::\n \\ell_c(x, y) = L_c = \\{l_{1,c},\\dots,l_{N,c}\\}^\\top, \\quad\n l_{n,c} = - w_{n,c} \\left[ p_c y_{n,c} \\cdot \\log \\sigma(x_{n,c})\n + (1 - y_{n,c}) \\cdot \\log (1 - \\sigma(x_{n,c})) \\right],\n\n where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification,\n :math:`c = 1` for single-label binary classification),\n :math:`n` is the number of the sample in the batch and\n :math:`p_c` is the weight of the positive answer for the class :math:`c`.\n\n :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision.\n\n For example, if a dataset contains 100 positive and 300 negative examples of a single class,\n then `pos_weight` for the class should be equal to :math:`\\frac{300}{100}=3`.\n The loss would act as if the dataset contains :math:`3\\times 100=300` positive examples.\n\n Examples::\n\n >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10\n >>> output = torch.full([10, 64], 1.5) # A prediction (logit)\n >>> pos_weight = torch.ones([64]) # All weights are equal to 1\n >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n >>> criterion(output, target) # -log(sigmoid(1.5))\n tensor(0.2014)\n\n Args:\n weight (Tensor, optional): a manual rescaling weight given to the loss\n of each batch element. If given, has to be a Tensor of size `nbatch`.\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there are multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when :attr:`reduce` is ``False``. Default: ``True``\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n pos_weight (Tensor, optional): a weight of positive examples.\n Must be a vector with length equal to the number of classes.\n\n Shape:\n - Input: :math:`(*)`, where :math:`*` means any number of dimensions.\n - Target: :math:`(*)`, same shape as the input.\n - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same\n shape as input.\n\n Examples::\n\n >>> loss = nn.BCEWithLogitsLoss()\n >>> input = torch.randn(3, requires_grad=True)\n >>> target = torch.empty(3).random_(2)\n >>> output = loss(input, target)\n >>> output.backward()\n \"\"\"\n def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean',\n pos_weight: Optional[Tensor] = None) -> None:\n super(BCEWithLogitsLoss, self).__init__(size_average, reduce, reduction)\n self.register_buffer('weight', weight)\n self.register_buffer('pos_weight', pos_weight)\n self.weight: Optional[Tensor]\n self.pos_weight: Optional[Tensor]\n\n def forward(self, inputs, target):\n return F.binary_cross_entropy_with_logits(inputs, target,\n self.weight)" ]
[ [ "torch.nn.functional.binary_cross_entropy_with_logits" ] ]
egbQuantum/strawberryfields
[ "674e4fe2de5e5dd791a77f1cd219009120dcbbbf" ]
[ "strawberryfields/backends/states.py" ]
[ "# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\n.. _state_class:\n\nQuantum states API\n========================================================\n\n**Module name:** :mod:`strawberryfields.backends.states`\n\n.. currentmodule:: strawberryfields.backends.states\n\nThis module provides classes which represent the quantum state\nreturned by a simulator backend via :class:`.Engine`.\n\n\nBase quantum state\n-------------------------------\n\nAn abstract base class for the representation of quantum states. This class should not be instantiated on its\nown, instead all states will be represented by one of the inheriting subclasses.\n\nThis class contains all methods that should be supported by all\ninheriting classes.\n\n.. note::\n In the following, keyword arguments are denoted ``**kwargs``, and allow additional\n options to be passed to the underlying State class - these are documented where\n available. For more details on relevant keyword arguments, please\n consult the backend documentation directly.\n\n.. currentmodule:: strawberryfields.backends.states.BaseState\n\n.. autosummary::\n data\n hbar\n is_pure\n num_modes\n mode_names\n mode_indices\n reduced_dm\n fock_prob\n mean_photon\n fidelity\n fidelity_vacuum\n fidelity_coherent\n wigner\n quad_expectation\n poly_quad_expectation\n\n\nBase Gaussian state\n-------------------------------\n\nClass for the representation of quantum states using the Gaussian formalism.\nThis class extends the class :class:`~.BaseState` with additional methods\nunique to Gaussian states.\n\nNote that backends using the Gaussian state representation may extend this class with\nadditional methods particular to the backend, for example :class:`~.GaussianState`\nin the :ref:`gaussian_backend`.\n\n.. currentmodule:: strawberryfields.backends.states.BaseGaussianState\n\n.. autosummary::\n means\n cov\n reduced_gaussian\n is_coherent\n is_squeezed\n displacement\n squeezing\n\n\nBase Fock state\n-------------------------------\n\nClass for the representation of quantum states in the Fock basis.\nThis class extends the class :class:`~.BaseState` with additional methods\nunique to states in the Fock-basis representation.\n\nNote that backends using Fock-basis representation may extend this class with\nadditional methods particular to the backend, for example :class:`~.FockStateTF`\nin the :ref:`Tensorflow_backend`.\n\n.. currentmodule:: strawberryfields.backends.states.BaseFockState\n\n.. autosummary::\n cutoff_dim\n ket\n dm\n trace\n all_fock_probs\n\n.. currentmodule:: strawberryfields.backends.states\n\n\nCode details\n~~~~~~~~~~~~\n\n\"\"\"\nimport abc\nimport string\nfrom itertools import chain\nfrom copy import copy\n\nimport numpy as np\nfrom scipy.linalg import block_diag\nfrom scipy.stats import multivariate_normal\nfrom scipy.special import factorial\n\nimport strawberryfields as sf\nfrom .shared_ops import rotation_matrix as _R\nfrom .shared_ops import changebasis\n\nindices = string.ascii_lowercase\n\nclass BaseState(abc.ABC):\n r\"\"\"Abstract base class for the representation of quantum states.\"\"\"\n EQ_TOLERANCE = 1e-10\n\n def __init__(self, num_modes, mode_names=None):\n self._modes = num_modes\n self._hbar = sf.hbar # always use the global frontend hbar value for state objects\n self._data = None\n self._pure = None\n\n if mode_names is None:\n self._modemap = {i:\"mode {}\".format(i) for i in range(num_modes)}\n else:\n self._modemap = {i:'{}'.format(j) for i, j in zip(range(num_modes), mode_names)}\n\n self._str = \"<BaseState: num_modes={}, pure={}, hbar={}>\".format(\n self.num_modes, self._pure, self._hbar)\n\n def __str__(self):\n return self._str\n\n def __repr__(self):\n return self._str\n\n @property\n def data(self):\n r\"\"\"Returns the underlying numerical (or symbolic) representation of the state.\n The form of this data differs for different backends.\"\"\"\n return self._data\n\n @property\n def hbar(self):\n r\"\"\"Returns the value of :math:`\\hbar` used in the generation of the state.\n\n The value of :math:`\\hbar` is a convention chosen in the definition of\n :math:`\\x` and :math:`\\p`. See :ref:`opcon` for more details.\n\n Returns:\n float: :math:`\\hbar` value.\n \"\"\"\n return self._hbar\n\n @property\n def is_pure(self):\n r\"\"\"Checks whether the state is a pure state.\n\n Returns:\n bool: True if and only if the state is pure.\n \"\"\"\n return self._pure\n\n @property\n def num_modes(self):\n r\"\"\"Gets the number of modes that the state represents.\n\n Returns:\n int: the number of modes in the state\n \"\"\"\n return self._modes\n\n @property\n def mode_names(self):\n r\"\"\"Returns a dictionary mapping the mode index to mode names.\n\n The mode names are determined from the initialization argument\n ``mode_names``. If these were not supplied, the names are generated automatically based\n on the mode indices.\n\n Returns:\n dict: dictionary of the form ``{i:\"mode name\",...}``\n \"\"\"\n return self._modemap\n\n @property\n def mode_indices(self):\n r\"\"\"Returns a dictionary mapping the mode names to mode indices.\n\n The mode names are determined from the initialization argument\n ``mode_names``. If these were not supplied, the names are generated automatically based\n on the mode indices.\n\n Returns:\n dict: dictionary of the form ``{\"mode name\":i,...}``\n \"\"\"\n return {v: k for k, v in self._modemap.items()}\n\n @abc.abstractmethod\n def reduced_dm(self, modes, **kwargs):\n r\"\"\"Returns a reduced density matrix in the Fock basis.\n\n Args:\n modes (int or Sequence[int]): specifies the mode(s) to return the reduced density matrix for.\n **kwargs:\n\n * **cutoff** (*int*): (default 10) specifies where to truncate the returned density matrix.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n array: the reduced density matrix for the specified modes\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fock_prob(self, n, **kwargs):\n r\"\"\"Probability of a particular Fock basis state.\n\n Computes the probability :math:`|\\braket{\\vec{n}|\\psi}|^2` of measuring\n the given multi-mode Fock state based on the state :math:`\\ket{\\psi}`.\n\n .. warning::\n\n Computing the Fock probabilities of states has exponential scaling\n in the Gaussian representation (for example states output by a\n Gaussian backend as a :class:`~.BaseGaussianState`).\n This shouldn't affect small-scale problems, where only a few Fock\n basis state probabilities need to be calculated, but will become\n evident in larger scale problems.\n\n Args:\n n (Sequence[int]): the Fock state :math:`\\ket{\\vec{n}}` that we want to measure the probability of\n **kwargs:\n\n * **cutoff** (*int*): (default 10) specifies the fock basis truncation when calculating\n of the fock basis probabilities.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n float: measurement probability\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def mean_photon(self, mode, **kwargs):\n \"\"\"Returns the mean photon number of a particular mode.\n\n Args:\n mode (int): specifies the mode\n **kwargs:\n\n * **cutoff** (*int*): (default 10) Fock basis trunction for calculation of\n mean photon number.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n tuple: the mean photon number and variance\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity(self, other_state, mode, **kwargs):\n r\"\"\"Fidelity of the reduced state in the specified mode with a user supplied state.\n Note that this method only supports single-mode states.\n\n Args:\n other_state: a pure state vector array represented in the Fock basis (for Fock backends)\n or a Sequence ``(mu, cov)`` containing the means and covariance matrix (for Gaussian backends)\n\n Returns:\n The fidelity of the circuit state with ``other_state``.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_vacuum(self, **kwargs):\n \"\"\"The fidelity of the state with the vacuum state.\n\n Returns:\n float: the fidelity of the state with the vacuum\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_coherent(self, alpha_list, **kwargs):\n r\"\"\"The fidelity of the state with a product of coherent states.\n\n The fidelity is defined by\n\n .. math:: \\bra{\\vec{\\alpha}}\\rho\\ket{\\vec{\\alpha}}\n\n Args:\n alpha_list (Sequence[complex]): list of coherent state parameters, one for each mode\n\n Returns:\n float: the fidelity value\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def wigner(self, mode, xvec, pvec):\n r\"\"\"Calculates the discretized Wigner function of the specified mode.\n\n Args:\n mode (int): the mode to calculate the Wigner function for\n xvec (array): array of discretized :math:`x` quadrature values\n pvec (array): array of discretized :math:`p` quadrature values\n\n Returns:\n array: 2D array of size [len(xvec), len(pvec)], containing reduced Wigner function\n values for specified x and p values.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def quad_expectation(self, mode, phi=0, **kwargs):\n r\"\"\"The :math:`\\x_{\\phi}` operator expectation values and variance for the specified mode.\n\n The :math:`\\x_{\\phi}` operator is defined as follows,\n\n .. math:: \\x_{\\phi} = \\cos\\phi~\\x + \\sin\\phi~\\p\n\n with corresponding expectation value\n\n .. math:: \\bar{x_{\\phi}}=\\langle x_{\\phi}\\rangle = \\text{Tr}(\\x_{\\phi}\\rho_{mode})\n\n and variance\n\n .. math:: \\Delta x_{\\phi}^2 = \\langle x_{\\phi}^2\\rangle - \\braket{x_{\\phi}}^2\n\n Args:\n mode (int): the requested mode\n phi (float): quadrature angle, clockwise from the positive :math:`x` axis.\n\n * :math:`\\phi=0` corresponds to the :math:`x` expectation and variance (default)\n * :math:`\\phi=\\pi/2` corresponds to the :math:`p` expectation and variance\n\n Returns:\n tuple (float, float): expectation value and variance\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n r\"\"\"The multi-mode expectation values and variance of arbitrary 2nd order polynomials\n of quadrature operators.\n\n An arbitrary 2nd order polynomial of quadrature operators over $N$ modes can always\n be written in the following form:\n\n .. math:: P(\\mathbf{r}) = \\mathbf{r}^T A\\mathbf{r} + \\mathbf{r}^T \\mathbf{d} + k I\n\n where:\n\n * :math:`A\\in\\mathbb{R}^{2N\\times 2N}` is a symmetric matrix\n representing the quadratic coefficients,\n * :math:`\\mathbf{d}\\in\\mathbb{R}^{2N}` is a real vector representing\n the linear coefficients,\n * :math:`k\\in\\mathbb{R}` represents the constant term, and\n * :math:`\\mathbf{r} = (\\x_1,\\dots,\\x_N,\\p_1,\\dots,\\p_N)` is the vector\n of quadrature operators in :math:`xp`-ordering.\n\n This method returns the expectation value of this second-order polynomial,\n\n .. math:: \\langle P(\\mathbf{r})\\rangle,\n\n as well as the variance\n\n .. math:: \\Delta P(\\mathbf{r})^2 = \\braket{P(\\mathbf{r})^2} - \\braket{P(\\mathbf{r})}^2\n\n Args:\n A (array): a real symmetric 2Nx2N NumPy array, representing the quadratic\n coefficients of the second order quadrature polynomial.\n d (array): a real length-2N NumPy array, representing the linear\n coefficients of the second order quadrature polynomial. Defaults to the zero vector.\n k (float): the constant term. Default 0.\n phi (float): quadrature angle, clockwise from the positive :math:`x` axis. If provided,\n the vectori of quadrature operators :math:`\\mathbf{r}` is first rotated\n by angle :math:`\\phi` in the phase space.\n\n\n Returns:\n tuple (float, float): expectation value and variance\n \"\"\"\n raise NotImplementedError\n\n\nclass BaseFockState(BaseState):\n r\"\"\"Class for the representation of quantum states in the Fock basis.\n\n Args:\n state_data (array): the state representation in the Fock basis\n num_modes (int): the number of modes in the state\n pure (bool): True if the state is a pure state, false if the state is mixed\n cutoff_dim (int): the Fock basis truncation size\n mode_names (Sequence): (optional) this argument contains a list providing mode names\n for each mode in the state\n \"\"\"\n\n def __init__(self, state_data, num_modes, pure, cutoff_dim, mode_names=None):\n # pylint: disable=too-many-arguments\n\n super().__init__(num_modes, mode_names)\n\n self._data = state_data\n self._cutoff = cutoff_dim\n self._pure = pure\n self._basis = 'fock'\n\n self._str = \"<FockState: num_modes={}, cutoff={}, pure={}, hbar={}>\".format(\n self.num_modes, self._cutoff, self._pure, self._hbar)\n\n def __eq__(self, other):\n \"\"\"Equality operator for BaseFockState.\n\n Returns True if other BaseFockState is close to self.\n This is done by comparing the dm attribute - if within\n the EQ_TOLERANCE, True is returned.\n\n Args:\n other (BaseFockState): BaseFockState to compare against.\n \"\"\"\n if not isinstance(other, type(self)):\n return False\n\n if self.num_modes != other.num_modes:\n return False\n\n if self.data.shape != other.data.shape:\n return False\n\n if np.allclose(self.dm(), other.dm(), atol=self.EQ_TOLERANCE, rtol=0):\n return True\n\n return False\n\n @property\n def cutoff_dim(self):\n r\"\"\"The numerical truncation of the Fock space used by the underlying state.\n Note that a cutoff of D corresponds to the Fock states :math:`\\{|0\\rangle,\\dots,|D-1\\rangle\\}`\n\n Returns:\n int: the cutoff dimension\n \"\"\"\n return self._cutoff\n\n def ket(self, **kwargs):\n r\"\"\"The numerical state vector for the quantum state.\n Note that if the state is mixed, this method returns None.\n\n Returns:\n array/None: the numerical state vector. Returns None if the state is mixed.\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n return self.data\n\n return None # pragma: no cover\n\n def dm(self, **kwargs):\n r\"\"\"The numerical density matrix for the quantum state.\n\n Returns:\n array: the numerical density matrix in the Fock basis\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n left_str = [indices[i] for i in range(0, 2 * self._modes, 2)]\n right_str = [indices[i] for i in range(1, 2 * self._modes, 2)]\n out_str = [indices[: 2 * self._modes]]\n einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str)\n rho = np.einsum(einstr, self.ket(), self.ket().conj())\n return rho\n\n return self.data\n\n def trace(self, **kwargs):\n r\"\"\"Trace of the density operator corresponding to the state.\n\n For pure states the trace corresponds to the squared norm of the ket vector.\n\n For physical states this should always be 1, any deviations from this value are due\n to numerical errors and Hilbert space truncation artefacts.\n\n Returns:\n float: trace of the state\n \"\"\"\n # pylint: disable=unused-argument\n if self.is_pure:\n return np.vdot(self.ket(), self.ket()).real # <s|s>\n\n # need some extra steps to trace over multimode matrices\n eqn_indices = [[indices[idx]] * 2 for idx in range(self._modes)] #doubled indices [['i','i'],['j','j'], ... ]\n eqn = \"\".join(chain.from_iterable(eqn_indices)) # flatten indices into a single string 'iijj...'\n return np.einsum(eqn, self.dm()).real\n\n def all_fock_probs(self, **kwargs):\n r\"\"\"Probabilities of all possible Fock basis states for the current circuit state.\n\n For example, in the case of 3 modes, this method allows the Fock state probability\n :math:`|\\braketD{0,2,3}{\\psi}|^2` to be returned via\n\n .. code-block:: python\n\n probs = state.all_fock_probs()\n probs[0,2,3]\n\n Returns:\n array: array of dimension :math:`\\underbrace{D\\times D\\times D\\cdots\\times D}_{\\text{num modes}}`\n containing the Fock state probabilities, where :math:`D` is the Fock basis cutoff truncation\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n s = np.ravel(self.ket()) # into 1D array\n return np.reshape((s * s.conj()).real, [self._cutoff]*self._modes)\n\n s = self.dm()\n num_axes = len(s.shape)\n evens = [k for k in range(0, num_axes, 2)]\n odds = [k for k in range(1, num_axes, 2)]\n flat_size = np.prod([s.shape[k] for k in range(0, num_axes, 2)])\n transpose_list = evens + odds\n probs = np.diag(np.reshape(np.transpose(s, transpose_list), [flat_size, flat_size])).real\n\n return np.reshape(probs, [self._cutoff]*self._modes)\n\n #=====================================================\n # the following methods are overwritten from BaseState\n\n def reduced_dm(self, modes, **kwargs):\n # pylint: disable=unused-argument\n if modes == list(range(self._modes)):\n # reduced state is full state\n return self.dm() # pragma: no cover\n\n if isinstance(modes, int):\n modes = [modes]\n if modes != sorted(modes):\n raise ValueError(\"The specified modes cannot be duplicated.\")\n\n if len(modes) > self._modes:\n raise ValueError(\"The number of specified modes cannot \"\n \"be larger than the number of subsystems.\")\n\n # reduce rho down to specified subsystems\n keep_indices = indices[: 2 * len(modes)]\n trace_indices = indices[2 * len(modes) : len(modes) + self._modes]\n\n ind = [i * 2 for i in trace_indices]\n ctr = 0\n\n for m in range(self._modes):\n if m in modes:\n ind.insert(m, keep_indices[2 * ctr : 2 * (ctr + 1)])\n ctr += 1\n\n indStr = ''.join(ind) + '->' + keep_indices\n return np.einsum(indStr, self.dm())\n\n def fock_prob(self, n, **kwargs):\n # pylint: disable=unused-argument\n if len(n) != self._modes:\n raise ValueError(\"List length should be equal to number of modes\")\n\n elif max(n) >= self._cutoff:\n raise ValueError(\"Can't get distribution beyond truncation level\")\n\n if self._pure:\n return np.abs(self.ket()[tuple(n)])**2\n\n return self.dm()[tuple([n[i//2] for i in range(len(n)*2)])].real\n\n def mean_photon(self, mode, **kwargs):\n # pylint: disable=unused-argument\n n = np.arange(self._cutoff)\n probs = np.diagonal(self.reduced_dm(mode))\n mean = np.sum(n*probs).real\n var = np.sum(n**2*probs).real - mean**2\n return mean, var\n\n def fidelity(self, other_state, mode, **kwargs):\n # pylint: disable=unused-argument\n max_indices = len(indices) // 2\n\n if self.num_modes > max_indices:\n raise Exception(\"fidelity method can only support up to {} modes\".format(max_indices))\n\n left_indices = indices[:mode]\n eqn_left = \"\".join([i*2 for i in left_indices])\n reduced_dm_indices = indices[mode:mode + 2]\n right_indices = indices[mode + 2:self._modes + 1]\n eqn_right = \"\".join([i*2 for i in right_indices])\n eqn = \"\".join([eqn_left, reduced_dm_indices, eqn_right]) + \"->\" + reduced_dm_indices\n rho_reduced = np.einsum(eqn, self.dm())\n\n return np.dot(np.conj(other_state), np.dot(rho_reduced, other_state)).real\n\n def fidelity_vacuum(self, **kwargs):\n # pylint: disable=unused-argument\n alpha = np.zeros(self._modes)\n return self.fidelity_coherent(alpha)\n\n def fidelity_coherent(self, alpha_list, **kwargs):\n # pylint: disable=too-many-locals,unused-argument\n if self.is_pure:\n mode_size = 1\n s = self.ket()\n else:\n mode_size = 2\n s = self.dm()\n\n if not hasattr(alpha_list, \"__len__\"):\n alpha_list = [alpha_list] # pragma: no cover\n\n if len(alpha_list) != self._modes:\n raise ValueError(\"The number of alpha values must match the number of modes.\")\n\n coh = lambda a, dim: np.array(\n [np.exp(-0.5 * np.abs(a) ** 2) * (a) ** n / np.sqrt(factorial(n)) for n in range(dim)]\n )\n\n if self._modes == 1:\n multi_cohs_vec = coh(alpha_list[0], self._cutoff)\n else:\n multi_cohs_list = [coh(alpha_list[idx], dim) for idx, dim in enumerate(s.shape[::mode_size])]\n eqn = \",\".join(indices[:self._modes]) + \"->\" + indices[:self._modes]\n multi_cohs_vec = np.einsum(eqn, *multi_cohs_list) # tensor product of specified coherent states\n\n if self.is_pure:\n ovlap = np.vdot(multi_cohs_vec, s)\n return np.abs(ovlap) ** 2\n\n bra_indices = indices[:2 * self._modes:2]\n ket_indices = indices[1:2 * self._modes:2]\n new_eqn_lhs = \",\".join([bra_indices, ket_indices])\n new_eqn_rhs = \"\".join(bra_indices[idx] + ket_indices[idx] for idx in range(self._modes))\n outer_prod_eqn = new_eqn_lhs + \"->\" + new_eqn_rhs\n multi_coh_matrix = np.einsum(outer_prod_eqn, multi_cohs_vec, np.conj(multi_cohs_vec))\n\n return np.vdot(s, multi_coh_matrix).real\n\n def wigner(self, mode, xvec, pvec):\n r\"\"\"Calculates the discretized Wigner function of the specified mode.\n\n .. note::\n\n This code is a modified version of the 'iterative' method of the\n `wigner function provided in QuTiP <http://qutip.org/docs/4.0.2/apidoc/functions.html?highlight=wigner#qutip.wigner.wigner>`_,\n which is released under the BSD license, with the following\n copyright notice:\n\n Copyright (C) 2011 and later, P.D. Nation, J.R. Johansson,\n A.J.G. Pitchford, C. Granade, and A.L. Grimsmo. All rights reserved.\n\n Args:\n mode (int): the mode to calculate the Wigner function for\n xvec (array): array of discretized :math:`x` quadrature values\n pvec (array): array of discretized :math:`p` quadrature values\n\n Returns:\n array: 2D array of size [len(xvec), len(pvec)], containing reduced Wigner function\n values for specified x and p values.\n \"\"\"\n rho = self.reduced_dm(mode)\n Q, P = np.meshgrid(xvec, pvec)\n A = (Q + P * 1.0j) / (2*np.sqrt(self._hbar/2))\n\n Wlist = np.array([np.zeros(np.shape(A), dtype=complex) for k in range(self._cutoff)])\n\n # Wigner function for |0><0|\n Wlist[0] = np.exp(-2.0 * np.abs(A)**2) / np.pi\n\n # W = rho(0,0)W(|0><0|)\n W = np.real(rho[0, 0]) * np.real(Wlist[0])\n\n for n in range(1, self._cutoff):\n Wlist[n] = (2.0 * A * Wlist[n - 1]) / np.sqrt(n)\n W += 2 * np.real(rho[0, n] * Wlist[n])\n\n for m in range(1, self._cutoff):\n temp = copy(Wlist[m])\n # Wlist[m] = Wigner function for |m><m|\n Wlist[m] = (2 * np.conj(A) * temp - np.sqrt(m)\n * Wlist[m - 1]) / np.sqrt(m)\n\n # W += rho(m,m)W(|m><m|)\n W += np.real(rho[m, m] * Wlist[m])\n\n for n in range(m + 1, self._cutoff):\n temp2 = (2 * A * Wlist[n - 1] - np.sqrt(m) * temp) / np.sqrt(n)\n temp = copy(Wlist[n])\n # Wlist[n] = Wigner function for |m><n|\n Wlist[n] = temp2\n\n # W += rho(m,n)W(|m><n|) + rho(n,m)W(|n><m|)\n W += 2 * np.real(rho[m, n] * Wlist[n])\n\n return W / (self._hbar)\n\n def quad_expectation(self, mode, phi=0, **kwargs):\n a = np.diag(np.sqrt(np.arange(1, self._cutoff+5)), 1)\n x = np.sqrt(self._hbar/2) * (a + a.T)\n p = -1j * np.sqrt(self._hbar/2) * (a - a.T)\n\n xphi = np.cos(phi)*x + np.sin(phi)*p\n xphisq = np.dot(xphi, xphi)\n\n # truncate down\n xphi = xphi[:self._cutoff, :self._cutoff]\n xphisq = xphisq[:self._cutoff, :self._cutoff]\n\n rho = self.reduced_dm(mode)\n\n mean = np.trace(np.dot(xphi, rho)).real\n var = np.trace(np.dot(xphisq, rho)).real - mean**2\n\n return mean, var\n\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n # pylint: disable=too-many-branches\n\n if A is None:\n A = np.zeros([2*self._modes, 2*self._modes])\n\n if A.shape != (2*self._modes, 2*self._modes):\n raise ValueError(\"Matrix of quadratic coefficients A must be of size 2Nx2N.\")\n\n if not np.allclose(A.T, A):\n raise ValueError(\"Matrix of quadratic coefficients A must be symmetric.\")\n\n if d is None:\n linear_coeff = np.zeros([2*self._modes])\n else:\n linear_coeff = d.copy()\n linear_coeff[self._modes:] = -d[self._modes:]\n\n if linear_coeff.shape != (2*self._modes,):\n raise ValueError(\"Vector of linear coefficients d must be of length 2N.\")\n\n # expand the cutoff dimension in approximating the x and p\n # operators in the Fock basis, to reduce numerical inaccuracy.\n worksize = 1\n dim = self._cutoff + worksize\n\n # construct the x and p operators\n a = np.diag(np.sqrt(np.arange(1, dim)), 1)\n x_ = np.sqrt(self._hbar/2) * (a + a.T)\n p_ = -1j * np.sqrt(self._hbar/2) * (a - a.T)\n\n if phi != 0:\n # rotate the quadrature operators\n x = np.cos(phi)*x_ - np.sin(phi)*p_\n p = np.sin(phi)*x_ + np.cos(phi)*p_\n else:\n x = x_\n p = p_\n\n def expand_dims(op, n, modes):\n \"\"\"Expand quadrature operator to act on nth mode\"\"\"\n I = np.identity(dim)\n allowed_indices = zip(indices[:2*modes:2], indices[1:2*modes:2])\n ind = ','.join(a+b for a, b in allowed_indices)\n ops = [I]*n + [op] + [I]*(modes-n-1)\n # the einsum 'ij,kl,mn->ijklmn' (for 3 modes)\n return np.einsum(ind, *ops)\n\n # determine modes with quadratic expectation values\n nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(linear_coeff.nonzero()[0], self._modes)])\n ex_modes = list(set(nonzero))\n num_modes = len(ex_modes)\n\n if not ex_modes:\n # only a constant term was provided\n return k, 0.\n\n # There are non-zero elements of A and/or d\n # therefore there are quadratic and/or linear terms.\n # find the reduced density matrix\n rho = self.reduced_dm(modes=ex_modes)\n\n # generate vector of quadrature operators\n # this array will have shape [2*num_modes] + [dim]*(2*num_modes)\n r = np.empty([2*num_modes] + [dim]*(2*num_modes), dtype=np.complex128)\n for n in range(num_modes):\n r[n] = expand_dims(x, n, num_modes)\n r[num_modes+n] = expand_dims(p, n, num_modes)\n\n # reduce the size of A so that we only consider modes\n # which we need to calculate the expectation value for\n rows = ex_modes + [i+self._modes for i in ex_modes]\n quad_coeffs = A[:, rows][rows]\n quad_coeffs[num_modes:, :num_modes] = -quad_coeffs[num_modes:, :num_modes]\n quad_coeffs[:num_modes, num_modes:] = -quad_coeffs[:num_modes, num_modes:]\n\n # Compute the polynomial\n #\n # For 3 modes, this gives the einsum (with brackets denoting modes):\n # 'a(bc)(de)(fg),a(ch)(ei)(gj)->(bh)(di)(fj)' applied to r, A@r\n #\n # a corresponds to the index in the vector of quadrature operators\n # r = (x_1,...,x_n,p_1,...,p_n), and the remaining indices ijklmn\n # are the elements of the operator acting on a 3 mode density matrix.\n #\n # So, in effect, matrix of quadratic coefficients A acts only on index a,\n # this index is then summed, and then each mode of r, A@r undergoes\n # matrix multiplication\n ind1 = indices[:2*num_modes+1]\n ind2 = ind1[0] + ''.join([str(i)+str(j) for i, j in zip(ind1[2::2], indices[2*num_modes+1:3*num_modes+1])])\n ind3 = ''.join([str(i)+str(j) for i, j in zip(ind1[1::2], ind2[2::2])])\n ind = \"{},{}->{}\".format(ind1, ind2, ind3)\n\n if np.allclose(quad_coeffs, 0.):\n poly_op = np.zeros([dim]*(2*num_modes), dtype=np.complex128)\n else:\n # Einsum above applied to to r,Ar\n # This einsum sums over all quadrature operators, and also applies matrix\n # multiplication between the same mode of each operator\n poly_op = np.einsum(ind, r, np.tensordot(quad_coeffs, r, axes=1)).conj()\n\n # add linear term\n rows = np.flip(np.array(rows).reshape([2, -1]), axis=1).flatten()\n poly_op += r.T @ linear_coeff[rows]\n\n # add constant term\n if k != 0:\n poly_op += k*expand_dims(np.eye(dim), 0, num_modes)\n\n # calculate Op^2\n ind = \"{},{}->{}\".format(ind1[1:], ind2[1:], ind3)\n poly_op_sq = np.einsum(ind, poly_op, poly_op)\n\n # truncate down\n sl = tuple([slice(0, self._cutoff)]*(2*num_modes))\n poly_op = poly_op[sl]\n poly_op_sq = poly_op_sq[sl]\n\n ind1 = ind1[:-1]\n ind2 = ''.join([str(j)+str(i) for i, j in zip(ind1[::2], ind1[1::2])])\n ind = \"{},{}\".format(ind1, ind2)\n\n # calculate expectation value, Tr(Op @ rho)\n # For 3 modes, this gives the einsum '(ab)(cd)(ef),(ba)(dc)(fe)->'\n mean = np.einsum(ind, poly_op, rho).real\n\n # calculate variance Tr(Op^2 @ rho) - Tr(Op @ rho)^2\n var = np.einsum(ind, poly_op_sq, rho).real - mean**2\n\n return mean, var\n\n\nclass BaseGaussianState(BaseState):\n r\"\"\"Class for the representation of quantum states using the Gaussian formalism.\n\n Note that this class uses the Gaussian representation convention\n\n .. math:: \\bar{\\mathbf{r}} = (\\bar{x}_1,\\bar{x}_2,\\dots,\\bar{x}_N,\\bar{p}_1,\\dots,\\bar{p}_N)\n\n Args:\n state_data (tuple(mu, cov)): A tuple containing the vector of means array ``mu`` and the\n covariance matrix array ``cov``, in terms of the complex displacement.\n num_modes (int): the number of modes in the state\n pure (bool): True if the state is a pure state, false if the state is mixed\n mode_names (Sequence): (optional) this argument contains a list providing mode names\n for each mode in the state\n \"\"\"\n def __init__(self, state_data, num_modes, mode_names=None):\n super().__init__(num_modes, mode_names)\n\n self._data = state_data\n\n # vector of means and covariance matrix, using frontend x,p scaling\n self._mu = self._data[0] * np.sqrt(self._hbar/2)\n self._cov = self._data[1] * (self._hbar/2)\n # complex displacements of the Gaussian state\n self._alpha = self._mu[:self._modes] + 1j*self._mu[self._modes:]\n self._alpha /= np.sqrt(2*self._hbar)\n\n self._pure = np.abs(np.linalg.det(self._cov) - (self._hbar/2)**(2*self._modes)) < self.EQ_TOLERANCE\n\n self._basis = 'gaussian'\n self._str = \"<GaussianState: num_modes={}, pure={}, hbar={}>\".format(\n self.num_modes, self._pure, self._hbar)\n\n def __eq__(self, other):\n \"\"\"Equality operator for BaseGaussianState.\n\n Returns True if other BaseGaussianState is close to self.\n This is done by comparing the means vector and cov matrix.\n If both are within the EQ_TOLERANCE, True is returned.\n\n Args:\n other (BaseGaussianState): BaseGaussianState to compare against.\n \"\"\"\n #pylint: disable=protected-access\n if not isinstance(other, type(self)):\n return False\n\n if self.num_modes != other.num_modes:\n return False\n\n if np.allclose(self._mu, other._mu, atol=self.EQ_TOLERANCE, rtol=0) and \\\n np.allclose(self._cov, other._cov, atol=self.EQ_TOLERANCE, rtol=0):\n return True\n\n return False\n\n def means(self):\n r\"\"\"The vector of means describing the Gaussian state.\n\n For a :math:`N` mode state, this has the form\n\n .. math::\n \\bar{\\mathbf{r}} = \\left(\\bar{x}_0,\\dots,\\bar{x}_{N-1},\\bar{p}_0,\\dots,\\bar{p}_{N-1}\\right)\n\n where :math:`\\bar{x}_i` and :math:`\\bar{p}_i` refer to the mean\n position and momentum quadrature of mode :math:`i` respectively.\n\n Returns:\n array: a length :math:`2N` array containing the vector of means.\n \"\"\"\n return self._mu\n\n def cov(self):\n r\"\"\"The covariance matrix describing the Gaussian state.\n\n The diagonal elements of the covariance matrix correspond to the\n variance in the position and momentum quadratures:\n\n .. math::\n \\mathbf{V}_{ii} = \\begin{cases}\n (\\Delta x_i)^2, & 0\\leq i\\leq N-1\\\\\n (\\Delta p_{i-N})^2, & N\\leq i\\leq 2(N-1)\n \\end{cases}\n\n where :math:`\\Delta x_i` and :math:`\\Delta p_i` refer to the\n position and momentum quadrature variance of mode :math:`i` respectively.\n\n Note that if the covariance matrix is purely diagonal, then this\n corresponds to squeezing :math:`z=re^{i\\phi}` where :math:`\\phi=0`,\n and :math:`\\Delta x_i = e^{-2r}`, :math:`\\Delta p_i = e^{2r}`.\n\n Returns:\n array: the :math:`2N\\times 2N` covariance matrix.\n \"\"\"\n return self._cov\n\n def reduced_gaussian(self, modes):\n r\"\"\" Returns the vector of means and the covariance matrix of the specified modes.\n\n Args:\n modes (int of Sequence[int]): indices of the requested modes\n\n Returns:\n tuple (means, cov): where means is an array containing the vector of means,\n and cov is a square array containing the covariance matrix.\n \"\"\"\n if modes == list(range(self._modes)):\n # reduced state is full state\n return self._mu, self._cov\n\n # reduce rho down to specified subsystems\n if isinstance(modes, int):\n modes = [modes]\n\n if modes != sorted(modes):\n raise ValueError(\"The specified modes cannot be duplicated.\")\n\n if len(modes) > self._modes:\n raise ValueError(\"The number of specified modes cannot \"\n \"be larger than the number of subsystems.\")\n\n ind = np.concatenate([np.array(modes), np.array(modes)+self._modes])\n rows = ind.reshape(-1, 1)\n cols = ind.reshape(1, -1)\n\n mu = self._mu[ind]\n cov = self._cov[rows, cols]\n\n return mu, cov\n\n def is_coherent(self, mode, tol=1e-10):\n r\"\"\"Returns True if the Gaussian state of a particular mode is a coherent state.\n\n Args:\n mode (int): the specified mode\n tol (float): the numerical precision in determining if squeezing is not present\n\n Returns:\n bool: True if and only if the state is a coherent state.\n \"\"\"\n mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n return np.allclose(cov, np.identity(2), atol=tol, rtol=0)\n\n def displacement(self, modes=None):\n r\"\"\"Returns the displacement parameter :math:`\\alpha` of the modes specified.\n\n Args:\n modes (int or Sequence[int]): modes specified\n\n Returns:\n Sequence[complex]: sequence of complex displacements :math:`\\alpha`\n corresponding to the list of specified modes\n \"\"\"\n if modes is None:\n modes = list(range(self._modes))\n elif isinstance(modes, int): # pragma: no cover\n modes = [modes]\n\n return self._alpha[list(modes)]\n\n def is_squeezed(self, mode, tol=1e-6):\n r\"\"\"Returns True if the Gaussian state of a particular mode is a squeezed state.\n\n Args:\n mode (int): the specified mode\n tol (float): the numerical precision in determining if squeezing is present\n\n Returns:\n bool: True if and only if the state is a squeezed state.\n \"\"\"\n mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n return np.any(np.abs(cov - np.identity(2)) > tol)\n\n def squeezing(self, modes=None):\n r\"\"\"Returns the squeezing parameters :math:`(r,\\phi)` of the modes specified.\n\n Args:\n modes (int or Sequence[int]): modes specified\n\n Returns:\n List[(float, float)]: sequence of tuples containing the squeezing\n parameters :math:`(r,\\phi)` of the specified modes.\n \"\"\"\n if modes is None:\n modes = list(range(self._modes))\n elif isinstance(modes, int): # pragma: no cover\n modes = [modes]\n\n res = []\n for i in modes:\n mu, cov = self.reduced_gaussian([i]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n tr = np.trace(cov)\n\n r = np.arccosh(tr/2)/2\n\n if cov[0, 1] == 0.:\n phi = 0\n else:\n phi = -np.arcsin(2*cov[0, 1] / np.sqrt((tr-2)*(tr+2)))\n\n res.append((r, phi))\n\n return res\n\n #=====================================================\n # the following methods are overwritten from BaseState\n\n def wigner(self, mode, xvec, pvec):\n mu, cov = self.reduced_gaussian([mode])\n\n X, P = np.meshgrid(xvec, pvec)\n grid = np.empty(X.shape + (2,))\n grid[:, :, 0] = X\n grid[:, :, 1] = P\n mvn = multivariate_normal(mu, cov, allow_singular=True)\n\n return mvn.pdf(grid)\n\n def quad_expectation(self, mode, phi=0, **kwargs):\n # pylint: disable=unused-argument\n mu, cov = self.reduced_gaussian([mode])\n rot = _R(phi)\n\n muphi = rot.T @ mu\n covphi = rot.T @ cov @ rot\n return (muphi[0], covphi[0, 0])\n\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n if A is None:\n A = np.zeros([2*self._modes, 2*self._modes])\n\n if A.shape != (2*self._modes, 2*self._modes):\n raise ValueError(\"Matrix of quadratic coefficients A must be of size 2Nx2N.\")\n\n if not np.allclose(A.T, A):\n raise ValueError(\"Matrix of quadratic coefficients A must be symmetric.\")\n\n if d is not None:\n if d.shape != (2*self._modes,):\n raise ValueError(\"Vector of linear coefficients d must be of length 2N.\")\n else:\n d = np.zeros([2*self._modes])\n\n # determine modes with quadratic expectation values\n nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(d.nonzero()[0], self._modes)])\n ex_modes = list(set(nonzero))\n\n # reduce the size of A so that we only consider modes\n # which we need to calculate the expectation value for\n rows = ex_modes + [i+self._modes for i in ex_modes]\n num_modes = len(ex_modes)\n quad_coeffs = A[:, rows][rows]\n\n if not ex_modes:\n # only a constant term was provided\n return k, 0.\n\n mu = self._mu\n cov = self._cov\n\n if phi != 0:\n # rotate all modes of the covariance matrix and vector of means\n R = _R(phi)\n C = changebasis(self._modes)\n rot = C.T @ block_diag(*([R]*self._modes)) @ C\n\n mu = rot.T @ mu\n cov = rot.T @ cov @ rot\n\n # transform to the expectation of a quadratic on a normal distribution with zero mean\n # E[P(r)]_(mu,cov) = E(Q(r+mu)]_(0,cov)\n # = E[rT.A.r + rT.(2A.mu+d) + (muT.A.mu+muT.d+cI)]_(0,cov)\n # = E[rT.A.r + rT.d' + k']_(0,cov)\n d2 = 2*A @ mu + d\n k2 = mu.T @ A @ mu + mu.T @ d + k\n\n # expectation value E[P(r)]_{mu=0} = tr(A.cov) + muT.A.mu + muT.d + k|_{mu=0}\n # = tr(A.cov) + k\n mean = np.trace(A @ cov) + k2\n # variance Var[P(r)]_{mu=0} = 2tr(A.cov.A.cov) + 4*muT.A.cov.A.mu + dT.cov.d|_{mu=0}\n # = 2tr(A.cov.A.cov) + dT.cov.d\n var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2\n\n # Correction term to account for incorrect symmetric ordering in the variance.\n # This occurs because Var[S(P(r))] = Var[P(r)] - Σ_{m1, m2} |hbar*A_{(m1, m1+N),(m2, m2+N)}|,\n # where m1, m2 are all possible mode numbers, and N is the total number of modes.\n # Therefore, the correction term is the sum of the determinants of 2x2 submatrices of A.\n modes = np.arange(2*num_modes).reshape(2, -1).T\n var -= np.sum([np.linalg.det(self._hbar*quad_coeffs[:, m][n]) for m in modes for n in modes])\n\n return mean, var\n\n @abc.abstractmethod\n def reduced_dm(self, modes, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fock_prob(self, n, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def mean_photon(self, mode, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity(self, other_state, mode, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_vacuum(self, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_coherent(self, alpha_list, **kwargs):\n raise NotImplementedError\n" ]
[ [ "numpy.sum", "scipy.special.factorial", "numpy.trace", "numpy.meshgrid", "numpy.allclose", "numpy.transpose", "numpy.arccosh", "numpy.reshape", "numpy.abs", "numpy.cos", "numpy.tensordot", "numpy.identity", "numpy.eye", "numpy.zeros", "numpy.linalg.det", "numpy.arange", "scipy.linalg.block_diag", "numpy.einsum", "numpy.array", "scipy.stats.multivariate_normal", "numpy.empty", "numpy.conj", "numpy.shape", "numpy.sqrt", "numpy.sin", "numpy.dot", "numpy.vdot", "numpy.real" ] ]
KyleKing/dash_charts
[ "8e3644505047fa85f3175f5bc55a2421cb0a19ea" ]
[ "tests/examples/ex_rolling_chart.py" ]
[ "\"\"\"Example Rolling Mean and Filled Standard Deviation Chart.\"\"\"\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom implements import implements\n\nfrom dash_charts.scatter_line_charts import RollingChart\nfrom dash_charts.utils_app import AppBase, AppInterface\nfrom dash_charts.utils_callbacks import map_args, map_outputs\nfrom dash_charts.utils_fig import make_dict_an, min_graph\nfrom dash_charts.utils_helpers import parse_dash_cli_args\n\n\n@implements(AppInterface) # noqa: H601\nclass RollingDemo(AppBase):\n \"\"\"Example creating a rolling mean chart.\"\"\"\n\n name = 'Example Rolling Chart'\n \"\"\"Application name\"\"\"\n\n data_raw = None\n \"\"\"All in-memory data referenced by callbacks and plotted. If modified, will impact all viewers.\"\"\"\n\n chart_main = None\n \"\"\"Main chart (Rolling).\"\"\"\n\n id_slider = 'slider'\n \"\"\"Slider ID.\"\"\"\n\n id_chart = 'rolling'\n \"\"\"Unique name for the main chart.\"\"\"\n\n def initialization(self) -> None:\n \"\"\"Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.\"\"\"\n super().initialization()\n self.register_uniq_ids([self.id_slider, self.id_chart])\n\n def generate_data(self) -> None:\n \"\"\"Create self.data_raw with sample data.\"\"\"\n # Generate random data points\n count = 1000\n mu, sigma = (15, 10) # mean and standard deviation\n samples = np.random.normal(mu, sigma, count)\n # Add a break at the mid-point\n mid_count = count / 2\n y_vals = [samples[_i] + (-1 if _i > mid_count else 1) * _i / 10.0 for _i in range(count)]\n\n # Combine into a dataframe\n self.data_raw = pd.DataFrame(\n data={\n 'x': range(count),\n 'y': y_vals,\n 'label': [f'Point {idx}' for idx in range(count)],\n },\n )\n\n def create_elements(self) -> None:\n \"\"\"Initialize the charts, tables, and other Dash elements.\"\"\"\n self.chart_main = RollingChart(\n title='Sample Timeseries Chart with Rolling Calculations',\n xlabel='Index',\n ylabel='Measured Value',\n )\n # Add some example annotations\n colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#e377c2', '#7f7f7f', '#17becf', None]\n count = 1000\n y_offset = np.mean(self.data_raw['y']) - np.amin(self.data_raw['y'])\n for idx, color in enumerate(colors):\n label = f'Additional Information for index {idx + 1} and color {color}'\n coord = [self.data_raw[ax][20 + int(idx * count / len(colors))] for ax in ['x', 'y']]\n self.chart_main.annotations.append(\n go.layout.Annotation(\n **make_dict_an(coord, str(idx + 1), label, color, y_offset),\n ),\n )\n\n def return_layout(self) -> dict:\n \"\"\"Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n \"\"\"\n step = 50\n slider_max = 1000\n return html.Div(\n style={\n 'maxWidth': '1000px',\n 'marginRight': 'auto',\n 'marginLeft': 'auto',\n }, children=[\n html.H4(children=self.name),\n min_graph(id=self._il[self.id_chart], figure=self.chart_main.create_figure(self.data_raw)),\n dcc.RangeSlider(\n id=self._il[self.id_slider], min=0, max=slider_max, step=step / 5, value=[150, 825],\n marks={str(idx * step): str(idx * step) for idx in range(int(slider_max / step))},\n ),\n ],\n )\n\n def create_callbacks(self) -> None:\n \"\"\"Create Dash callbacks.\"\"\"\n outputs = [(self.id_chart, 'figure')]\n inputs = [(self.id_slider, 'value')]\n states = []\n\n @self.callback(outputs, inputs, states, pic=True)\n def update_chart(*raw_args):\n a_in, a_states = map_args(raw_args, inputs, states)\n slider = a_in[self.id_slider]['value']\n df_filtered = self.data_raw[(self.data_raw['x'] >= slider[0]) & (self.data_raw['x'] <= slider[1])]\n self.chart_main.axis_range = {'x': slider}\n new_figure = self.chart_main.create_figure(df_raw=df_filtered)\n\n # See: https://plot.ly/python/range-slider/\n new_figure['layout']['xaxis']['rangeslider'] = {'visible': True}\n return map_outputs(outputs, [(self.id_chart, 'figure', new_figure)])\n\n\ninstance = RollingDemo\napp = instance()\napp.create()\nif __name__ == '__main__':\n app.run(**parse_dash_cli_args())\nelse:\n FLASK_HANDLE = app.get_server()\n" ]
[ [ "numpy.random.normal", "numpy.amin", "numpy.mean" ] ]
hluedemann/inplace_abn
[ "5210ec0b38ba7c6c9deb08927aa18806ac3380f3" ]
[ "scripts/models/densenet.py" ]
[ "import sys\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport torch.nn as nn\n\nfrom inplace_abn import ABN\nfrom modules import GlobalAvgPool2d, DenseModule\nfrom .util import try_index\n\n\nclass DenseNet(nn.Module):\n def __init__(self,\n structure,\n norm_act=ABN,\n input_3x3=False,\n growth=32,\n theta=0.5,\n classes=0,\n dilation=1):\n \"\"\"DenseNet\n\n Parameters\n ----------\n structure : list of int\n Number of layers in each of the four dense blocks of the network.\n norm_act : callable\n Function to create normalization / activation Module.\n input_3x3 : bool\n If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.\n growth : int\n Number of channels in each layer, i.e. the \"growth\" factor of the DenseNet.\n theta : float\n Reduction factor for the transition blocks.\n classes : int\n If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end\n of the network.\n dilation : int or list of int\n List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1`\n skip the pooling in the transition block right before it.\n \"\"\"\n super(DenseNet, self).__init__()\n self.structure = structure\n if len(structure) != 4:\n raise ValueError(\"Expected a structure with four values\")\n\n # Initial layers\n if input_3x3:\n layers = [\n (\"conv1\", nn.Conv2d(3, growth * 2, 3, stride=2, padding=1, bias=False)),\n (\"bn1\", norm_act(growth * 2)),\n (\"conv2\", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),\n (\"bn2\", norm_act(growth * 2)),\n (\"conv3\", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),\n (\"pool\", nn.MaxPool2d(3, stride=2, padding=1))\n ]\n else:\n layers = [\n (\"conv1\", nn.Conv2d(3, growth * 2, 7, stride=2, padding=3, bias=False)),\n (\"pool\", nn.MaxPool2d(3, stride=2, padding=1))\n ]\n self.mod1 = nn.Sequential(OrderedDict(layers))\n\n in_channels = growth * 2\n for mod_id in range(4):\n d = try_index(dilation, mod_id)\n s = 2 if d == 1 and mod_id > 0 else 1\n\n # Create transition module\n if mod_id > 0:\n out_channels = int(in_channels * theta)\n layers = [\n (\"bn\", norm_act(in_channels)),\n (\"conv\", nn.Conv2d(in_channels, out_channels, 1, bias=False))\n ]\n if s == 2:\n layers.append((\"pool\", nn.AvgPool2d(2, 2)))\n self.add_module(\"tra%d\" % (mod_id + 1), nn.Sequential(OrderedDict(layers)))\n in_channels = out_channels\n\n # Create dense module\n mod = DenseModule(in_channels, growth, structure[mod_id], norm_act=norm_act, dilation=d)\n self.add_module(\"mod%d\" % (mod_id + 2), mod)\n in_channels = mod.out_channels\n\n # Pooling and predictor\n self.bn_out = norm_act(in_channels)\n if classes != 0:\n self.classifier = nn.Sequential(OrderedDict([\n (\"avg_pool\", GlobalAvgPool2d()),\n (\"fc\", nn.Linear(in_channels, classes))\n ]))\n\n def forward(self, x):\n x = self.mod1(x)\n x = self.mod2(x)\n x = self.tra2(x)\n x = self.mod3(x)\n x = self.tra3(x)\n x = self.mod4(x)\n x = self.tra4(x)\n x = self.mod5(x)\n x = self.bn_out(x)\n\n if hasattr(self, \"classifier\"):\n x = self.classifier(x)\n return x\n\n\n_NETS = {\n \"121\": {\"structure\": [6, 12, 24, 16]},\n \"169\": {\"structure\": [6, 12, 32, 32]},\n \"201\": {\"structure\": [6, 12, 48, 32]},\n \"264\": {\"structure\": [6, 12, 64, 48]},\n}\n\n__all__ = []\nfor name, params in _NETS.items():\n net_name = \"net_densenet\" + name\n setattr(sys.modules[__name__], net_name, partial(DenseNet, **params))\n __all__.append(net_name)\n" ]
[ [ "torch.nn.Linear", "torch.nn.MaxPool2d", "torch.nn.Conv2d", "torch.nn.AvgPool2d" ] ]
lipinoelbreve/topicos-streamlit
[ "849d34ee4c8f1dbd700c50a2069e87dbb6c93663" ]
[ "data-collection/main.py" ]
[ "# %%\n# Recorre las 1000 paginas de articulos que podemos ver\n# De cada artículo guarda:\n# Url\n# Id de Pubmed\n# Título\n# Keywords\n# Lista de autores con nombres y afiliaciones y país\n\n# El código no para hasta que lo frenes o que llegue a la página 1.000, pero cada vez que carga un artículo lo guarda, así que se puede\n# frenar en cualquier momento\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport numpy as np\nfrom tqdm import tqdm\nfrom time import sleep\nimport os\nfrom Article import ArticleCollection\n\n#%%\nbase_url = 'https://pubmed.ncbi.nlm.nih.gov'\nfilter_url = '/?term=(((%222016%22%5BDate%20-%20Publication%5D%20%3A%20%223000%22%5BDate%20-%20Publication%5D))%20AND%20(%22english%22%5BLanguage%5D))%20AND%20(%22journal%20article%22%5BPublication%20Type%5D)'\n\nyears_to_process = [2016,2017,2018,2019,2020,2021]\npages_per_year = 500\npause_duration = 1 # segundos entre requests\n\n#%%\n# Cargo lo que se avanzo hasta ahora\ncurrent = os.getcwd()\nfilename = current + '/articles.pkl'\narticle_collection = ArticleCollection()\narticle_collection.load_years(years_to_process)\n\nremaining_pages = np.arange(1,pages_per_year+1)\nif os.path.exists(filename):\n article_collection.load('articles.pkl')\n processed_pages = article_collection.processed_pages[ article_collection.current_year ]\n \n if len(processed_pages) > 0:\n remaining_pages = np.arange(processed_pages[-1], pages_per_year+1)\n\n years_to_process = np.arange(article_collection.current_year, 2022)\n\n#%%\nprint('Descargando articulos...')\nprint('Ctrl + C para frenar (todo el proceso es guardado)')\n\nfor year in years_to_process:\n print('Processing year', year)\n article_collection.current_year = year\n for page in tqdm( remaining_pages ):\n url = base_url + filter_url + '&filter=years.' + str(year) + '-' + str(year) + '&page=' + str(page)\n r = requests.get(url)\n souped = BeautifulSoup(r.content.decode(\"utf-8\"), features=\"html.parser\")\n\n articles_in_page = souped.find_all('a', attrs={'class': 'docsum-title'})\n articles_ids = [ int(re.sub('[^\\d]', '', article['href'])) for article in articles_in_page ]\n \n for article_id in articles_ids:\n if article_id not in article_collection.articles.keys():\n article_link = base_url + '/' + str(article_id)\n res = article_collection.get_article_data( article_link )\n article_collection.save('articles.pkl')\n print(res, article_id)\n sleep(pause_duration)\n\n if page not in article_collection.processed_pages:\n article_collection.processed_pages[article_collection.current_year].append(page)\n print('Processed page', page, '-', article_collection.current_year)\n \n remaining_pages = np.arange(1, pages_per_year+1)" ]
[ [ "numpy.arange" ] ]
VisCog/ArgusShapes
[ "ba361e28b8d30097c41314bbfe68341cc8ac0c01" ]
[ "argus_shapes/tests/test_argus_shapes.py" ]
[ "from __future__ import absolute_import, division, print_function\nimport os\nimport numpy as np\nimport pandas as pd\nimport shutil\nimport requests\n\nimport numpy.testing as npt\nimport pytest\n\nimport skimage.io as skio\n\nfrom .. import argus_shapes as shapes\nimport pulse2percept.implants as p2pi\n\ntry:\n FileNotFoundError\nexcept NameError:\n # Python 2\n FileNotFoundError = IOError\n\n\ndef generate_dummy_data():\n X = pd.DataFrame()\n X['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'])\n X['feature1'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n X['feature2'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n y = pd.DataFrame()\n y['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'],\n index=X.index)\n y['target'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6],\n index=X.index)\n y['image'] = pd.Series([np.random.rand(10, 10)] * 6)\n y['area'] = pd.Series([1, 2, 3, 4, 5, 6])\n return X, y\n\n\ndef test_download_file():\n fname = \"test.zip\"\n with pytest.raises(requests.exceptions.HTTPError):\n shapes.download_file(\"https://github.com/VisCog/blah\", fname)\n shapes.download_file(\"https://osf.io/rduj4\", fname)\n os.remove(fname)\n\n\ndef test_fetch_data():\n test_dir = \"test\"\n with pytest.raises(ValueError):\n shapes.fetch_data()\n shapes.fetch_data(save_path=test_dir)\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes.zip')),\n True\n )\n npt.assert_equal(os.path.isdir(os.path.join(test_dir, 'argus_shapes')),\n True)\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes',\n 'drawings_single.csv')),\n True\n )\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes', 'subjects.csv')),\n True\n )\n shutil.rmtree(test_dir)\n\n\ndef test_load_data():\n with pytest.raises(FileNotFoundError):\n shapes.load_data(\"doesforsurenotexist.csv\", auto_fetch=False)\n\n csvfile = \"data.csv\"\n csvfile2 = \"data2.csv\"\n imgfile = \"test_image.png\"\n skio.imsave(imgfile, np.random.randint(256, size=(10, 10)))\n\n subjects = ['S1', 'S2']\n electrodes = ['A1', 'F9']\n amps = [2.0, 3.0]\n for use_fullpath in [True, False]:\n data = []\n for subject in subjects:\n for electrode in electrodes:\n for amp in amps:\n if use_fullpath:\n fname = os.path.join(os.getcwd(), imgfile)\n else:\n fname = imgfile\n row = {\n 'subject_id': subject,\n 'PTS_ELECTRODE': electrode,\n 'PTS_FILE': fname,\n 'PTS_AMP': amp,\n 'PTS_FREQ': 20.0,\n 'PTS_PULSE_DUR': 0.45,\n 'stim_class': 'SingleElectrode',\n 'date': '1985/09/30'\n }\n data.append(row)\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_data(csvfile)\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(len(X), len(subjects) * len(electrodes) * len(amps))\n\n with pytest.raises(ValueError):\n XX = X.copy()\n XX['PTS_ELECTRODE1'] = XX['electrode']\n XX['PTS_ELECTRODE2'] = XX['electrode']\n XX.drop(columns='electrode', inplace=True)\n XX.to_csv(csvfile2, index=False)\n X = shapes.load_data(csvfile2)\n\n for subject in subjects + ['nobody', 'S10']:\n X = shapes.load_data(csvfile, subject=subject)\n if subject in subjects:\n npt.assert_equal(np.sort(X.subject.unique()), subject)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(np.sort(X.amp.unique()), amps)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n for electrode in electrodes + ['F10']:\n X = shapes.load_data(csvfile, electrodes=[electrode])\n if electrode in electrodes:\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrode)\n npt.assert_equal(np.sort(X.amp.unique()), amps)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n for amp in amps + [1.5]:\n X = shapes.load_data(csvfile, amp=amp)\n if np.any([np.isclose(a, amp) for a in amps]):\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(np.sort(X.amp.unique()), amp)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n with pytest.raises(ValueError):\n shapes.load_data(csvfile, electrodes='A1')\n\n os.remove(csvfile)\n os.remove(csvfile2)\n os.remove(imgfile)\n\n\ndef test_load_subjects():\n with pytest.raises(FileNotFoundError):\n shapes.load_subjects(\"forsuredoesntexist.csv\", auto_fetch=False)\n\n csvfile = \"data.csv\"\n data = [\n {'subject_id': 'S1', 'implant_type_str': 'ArgusI',\n 'implant_x': 10, 'implant_y': 20, 'implant_rot': 0.5,\n 'xmin': -30, 'xmax': 30, 'ymin': -20, 'ymax': 20,\n 'loc_od_x': 15, 'loc_od_y': 2},\n {'subject_id': 'S2', 'implant_type_str': 'ArgusII',\n 'implant_x': 20, 'implant_y': 40, 'implant_rot': 1.0,\n 'xmin': -60, 'xmax': 60, 'ymin': -30, 'ymax': 30,\n 'loc_od_x': 19, 'loc_od_y': 4},\n ]\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n npt.assert_equal(np.sort(X.index.unique()), ['S1', 'S2'])\n print(X.columns)\n npt.assert_equal(X.loc['S1', 'implant_type'], p2pi.ArgusI)\n npt.assert_equal(X.loc['S2', 'implant_type'], p2pi.ArgusII)\n # etc.\n\n with pytest.raises(ValueError):\n # Missing 'subject_id' index:\n pd.DataFrame([{'subject': 'S1'}]).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n\n with pytest.raises(ValueError):\n # Other missing columns:\n pd.DataFrame([{'subject_id': 'S1'}]).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n with pytest.raises(ValueError):\n # Wrong implant type:\n data[0]['implant_type_str'] = 'ArgusIII'\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n os.remove(csvfile)\n\n\ndef test_is_singlestim_dataframe():\n with pytest.raises(ValueError):\n shapes.is_singlestim_dataframe(pd.DataFrame())\n\n df = pd.DataFrame([\n {'PTS_ELECTRODE': 'A01'},\n {'PTS_ELECTRODE': 'A02'}\n ])\n npt.assert_equal(shapes.is_singlestim_dataframe(df), True)\n\n df = pd.DataFrame([\n {'PTS_ELECTRODE1': 'A01', 'PTS_ELECTRODE2': 'A03'},\n {'PTS_ELECTRODE1': 'A02', 'PTS_ELECTRODE2': 'A04'}\n ])\n npt.assert_equal(shapes.is_singlestim_dataframe(df), False)\n\n\ndef test_calc_mean_images():\n with pytest.raises(ValueError):\n # empty list not allowed\n shapes.calc_mean_images(pd.DataFrame([]), groupby=[])\n with pytest.raises(ValueError):\n # groupby columns not present:\n shapes.calc_mean_images(pd.DataFrame([]))\n with pytest.raises(ValueError):\n # 'image' not in columns:\n shapes.calc_mean_images(pd.DataFrame([{'subject': 'S1'}]),\n groupby=['subject'])\n\n X, y = generate_dummy_data()\n Xy = pd.concat((X, y.drop(columns='subject')), axis=1)\n shapes.calc_mean_images(Xy, groupby=['subject'])\n" ]
[ [ "pandas.Series", "numpy.testing.assert_equal", "pandas.DataFrame", "numpy.isclose", "numpy.random.rand", "numpy.random.randint" ] ]
klimpie94/Python-training
[ "7af210126cfe2e9386a8f22075ea0d7eff80daac" ]
[ "Day2/pandas-exercises-python/python-exercises-02-questions/utils/transformation_functions.py" ]
[ "\nimport pandas as pd\n\n\ndef read_csv_files(file_path):\n return pd.read_csv(file_path)\n\n\ndef filter_films(dataframe):\n pass\n\n\ndef join_categories_with_metadata(facts_df, categories_df):\n # Hint: You can use lambda functions to change the id column in order to\n # use join method in pandas.\n pass\n\n\ndef categories_into_one_single_column(categories_df):\n # Hint: When you use dataframe.idxmax(axis=1) you automatically\n # create a pd.Series with categorical values as strings.\n pass\n\n\ndef take_year_from_movie_title_string(movie_title_str):\n try:\n pass\n except (IndexError, ValueError):\n return 9999\n\n\ndef genre_count_table_for_movies_with_aggregation(categories_df):\n pass\n\n\ndef calculate_ratio_of_nomination_over_win(dataframe):\n # Hint 1: Use an additional function for handling\n # zero division error.\n # Hint 2: Nominations + Wins = Total Number of Nominations\n\n pass\n" ]
[ [ "pandas.read_csv" ] ]
vanderschaarlab/MIRACLE
[ "ec28f5051d604a3134f9379b9a63a6cc379f2bc5" ]
[ "miracle/third_party/imputation_gain.py" ]
[ "# stdlib\nfrom typing import Tuple, Union\n\n# third party\nimport numpy as np\nfrom sklearn.base import TransformerMixin\n\n# Necessary packages\nimport torch\nfrom torch import nn\n\nEPS = 1e-8\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef sample_Z(m: int, n: int) -> np.ndarray:\n \"\"\"Random sample generator for Z.\n\n Args:\n m: number of rows\n n: number of columns\n\n Returns:\n np.ndarray: generated random values\n \"\"\"\n res = np.random.uniform(0.0, 0.01, size=[m, n])\n return torch.from_numpy(res).to(DEVICE)\n\n\ndef sample_M(m: int, n: int, p: float) -> np.ndarray:\n \"\"\"Hint Vector Generation\n\n Args:\n m: number of rows\n n: number of columns\n p: hint rate\n\n Returns:\n np.ndarray: generated random values\n \"\"\"\n unif_prob = np.random.uniform(0.0, 1.0, size=[m, n])\n M = unif_prob > p\n M = 1.0 * M\n\n return torch.from_numpy(M).to(DEVICE)\n\n\ndef sample_idx(m: int, n: int) -> np.ndarray:\n \"\"\"Mini-batch generation\n\n Args:\n m: number of rows\n n: number of columns\n\n Returns:\n np.ndarray: generated random indices\n \"\"\"\n idx = np.random.permutation(m)\n idx = idx[:n]\n return idx\n\n\nclass GainModel:\n \"\"\"The core model for GAIN Imputation.\n\n Args:\n dim: float\n Number of features.\n h_dim: float\n Size of the hidden layer.\n loss_alpha: int\n Hyperparameter for the generator loss.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n h_dim: int,\n loss_alpha: float = 10,\n ) -> None:\n self.generator_layer = nn.Sequential(\n nn.Linear(dim * 2, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, dim),\n nn.Sigmoid(),\n ).to(DEVICE)\n self.discriminator_layer = nn.Sequential(\n nn.Linear(dim * 2, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, dim),\n nn.Sigmoid(),\n ).to(DEVICE)\n self.loss_alpha = loss_alpha\n\n def discriminator(self, X: torch.Tensor, hints: torch.Tensor) -> torch.Tensor:\n inputs = torch.cat([X, hints], dim=1).float()\n return self.discriminator_layer(inputs)\n\n def generator(self, X: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n inputs = torch.cat([X, mask], dim=1).float()\n return self.generator_layer(inputs)\n\n def discr_loss(\n self, X: torch.Tensor, M: torch.Tensor, H: torch.Tensor\n ) -> torch.Tensor:\n G_sample = self.generator(X, M)\n X_hat = X * M + G_sample * (1 - M)\n D_prob = self.discriminator(X_hat, H)\n return -torch.mean(\n M * torch.log(D_prob + EPS) + (1 - M) * torch.log(1.0 - D_prob + EPS)\n )\n\n def gen_loss(\n self, X: torch.Tensor, M: torch.Tensor, H: torch.Tensor\n ) -> torch.Tensor:\n G_sample = self.generator(X, M)\n X_hat = X * M + G_sample * (1 - M)\n D_prob = self.discriminator(X_hat, H)\n\n G_loss1 = -torch.mean((1 - M) * torch.log(D_prob + EPS))\n MSE_train_loss = torch.mean((M * X - M * G_sample) ** 2) / torch.mean(M)\n\n return G_loss1 + self.loss_alpha * MSE_train_loss\n\n\nclass GainImputation(TransformerMixin):\n \"\"\"GAIN Imputation for static data using Generative Adversarial Nets.\n The training steps are:\n - The generato imputes the missing components conditioned on what is actually observed, and outputs a completed vector.\n - The discriminator takes a completed vector and attempts to determine which components were actually observed and which were imputed.\n\n Original Paper: J. Yoon, J. Jordon, M. van der Schaar, \"GAIN: Missing Data Imputation using Generative Adversarial Nets,\" ICML, 2018.\n\n\n Args:\n batch_size: int\n The batch size for the training steps.\n iterations: int\n Number of epochs for training.\n hint_rate: float\n Percentage of additional information for the discriminator.\n loss_alpha: int\n Hyperparameter for the generator loss.\n \"\"\"\n\n def __init__(\n self,\n batch_size: int = 128,\n iterations: int = 10000,\n hint_rate: float = 0.9,\n loss_alpha: float = 10,\n ) -> None:\n self.batch_size = batch_size\n self.iterations = iterations\n self.hint_rate = hint_rate\n self.loss_alpha = loss_alpha\n self.norm_parameters: Union[dict, None] = None\n self.model: Union[GainModel, None] = None\n\n def fit(self, X: torch.Tensor) -> \"GainImputation\":\n \"\"\"Train the GAIN model.\n\n Args:\n X: incomplete dataset.\n\n Returns:\n self: the updated model.\n \"\"\"\n X = X.clone()\n\n # Parameters\n no = len(X)\n dim = len(X[0, :])\n\n # Hidden state dimensions\n h_dim = dim\n\n # MinMaxScaler normalization\n min_val = np.zeros(dim)\n max_val = np.zeros(dim)\n\n X = X.cpu()\n\n for i in range(dim):\n min_val[i] = np.nanmin(X[:, i])\n X[:, i] = X[:, i] - np.nanmin(X[:, i])\n max_val[i] = np.nanmax(X[:, i])\n X[:, i] = X[:, i] / (np.nanmax(X[:, i]) + EPS)\n\n # Set missing\n mask = 1 - (1 * (np.isnan(X)))\n mask = mask.float().to(DEVICE)\n\n X = torch.nan_to_num(X)\n X = X.to(DEVICE)\n\n self.model = GainModel(dim, h_dim)\n\n D_solver = torch.optim.Adam(self.model.discriminator_layer.parameters())\n G_solver = torch.optim.Adam(self.model.generator_layer.parameters())\n\n def sample() -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n mb_size = min(self.batch_size, no)\n\n mb_idx = sample_idx(no, mb_size)\n x_mb = X[mb_idx, :].clone()\n m_mb = mask[mb_idx, :].clone()\n\n z_mb = sample_Z(mb_size, dim)\n h_mb = sample_M(mb_size, dim, 1 - self.hint_rate)\n h_mb = m_mb * h_mb\n\n x_mb = m_mb * x_mb + (1 - m_mb) * z_mb\n\n return x_mb, h_mb, m_mb\n\n for it in range(self.iterations):\n D_solver.zero_grad()\n\n x_mb, h_mb, m_mb = sample()\n\n D_loss = self.model.discr_loss(x_mb, m_mb, h_mb)\n D_loss.backward()\n D_solver.step()\n\n G_solver.zero_grad()\n x_mb, h_mb, m_mb = sample()\n G_loss = self.model.gen_loss(x_mb, m_mb, h_mb)\n G_loss.backward()\n G_solver.step()\n\n self.norm_parameters = {\"min\": min_val, \"max\": max_val}\n\n return self\n\n def transform(self, Xmiss: np.ndarray) -> np.ndarray:\n \"\"\"Return imputed data by trained GAIN model.\n\n Args:\n Xmiss: the array with missing data\n\n Returns:\n torch.Tensor: the array without missing data\n\n Raises:\n RuntimeError: if the result contains np.nans.\n \"\"\"\n Xmiss = torch.tensor(np.asarray(Xmiss)).to(DEVICE)\n if self.norm_parameters is None or self.model is None:\n raise RuntimeError(\"fit the model first\")\n\n X = Xmiss.clone()\n\n min_val = self.norm_parameters[\"min\"]\n max_val = self.norm_parameters[\"max\"]\n\n no, dim = X.shape\n\n X = X.cpu()\n # MinMaxScaler normalization\n for i in range(dim):\n X[:, i] = X[:, i] - min_val[i]\n X[:, i] = X[:, i] / (max_val[i] + EPS)\n\n # Set missing\n mask = 1 - (1 * (np.isnan(X)))\n mask = mask.float().to(DEVICE)\n\n x = torch.nan_to_num(X)\n x = x.to(DEVICE)\n\n # Imputed data\n z = sample_Z(no, dim)\n x = mask * x + (1 - mask) * z\n\n imputed_data = self.model.generator(x, mask)\n\n # Renormalize\n for i in range(dim):\n imputed_data[:, i] = imputed_data[:, i] * (max_val[i] + EPS)\n imputed_data[:, i] = imputed_data[:, i] + min_val[i]\n\n if np.all(np.isnan(imputed_data.detach().cpu().numpy())):\n err = \"The imputed result contains nan. This is a bug. Please report it on the issue tracker.\"\n raise RuntimeError(err)\n\n mask = mask.cpu()\n imputed_data = imputed_data.detach().cpu()\n\n return mask * np.nan_to_num(Xmiss.cpu()) + (1 - mask) * imputed_data\n\n def fit_transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Imputes the provided dataset using the GAIN strategy.\n\n Args:\n X: np.ndarray\n A dataset with missing values.\n\n Returns:\n Xhat: The imputed dataset.\n \"\"\"\n X = torch.tensor(np.asarray(X)).cpu()\n return self.fit(X).transform(X).detach().cpu().numpy()\n" ]
[ [ "numpy.random.uniform", "torch.nn.Linear", "numpy.nanmax", "numpy.zeros", "numpy.random.permutation", "numpy.asarray", "numpy.nanmin", "torch.nn.ReLU", "torch.cuda.is_available", "torch.from_numpy", "torch.log", "numpy.isnan", "torch.nn.Sigmoid", "torch.cat", "torch.mean", "torch.nan_to_num" ] ]
zpreisler/tensorflow
[ "f2b17b22e12bd743b66945070f338f70b5fa3332" ]
[ "tensorflow/contrib/distribute/python/metrics_v1_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for V1 metrics.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.contrib.distribute.python import combinations\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics\nfrom tensorflow.python.ops import variables\n\n\ndef _labeled_dataset_fn():\n # First four batches of x: labels, predictions -> (labels == predictions)\n # 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False\n # 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False\n # 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False\n # 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True\n return dataset_ops.Dataset.range(1000).map(\n lambda x: {\"labels\": x % 5, \"predictions\": x % 3}).batch(4)\n\n\ndef _boolean_dataset_fn():\n # First four batches of labels, predictions: {TP, FP, TN, FN}\n # with a threshold of 0.5:\n # T, T -> TP; F, T -> FP; T, F -> FN\n # F, F -> TN; T, T -> TP; F, T -> FP\n # T, F -> FN; F, F -> TN; T, T -> TP\n # F, T -> FP; T, F -> FN; F, F -> TN\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [True, False, True, False],\n \"predictions\": [True, True, False, False]}).repeat().batch(3)\n\n\ndef _threshold_dataset_fn():\n # First four batches of labels, predictions: {TP, FP, TN, FN}\n # with a threshold of 0.5:\n # True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN\n # False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP\n # True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP\n # False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [True, False, True, False],\n \"predictions\": [1.0, 0.75, 0.25, 0.]}).repeat().batch(3)\n\n\ndef _regression_dataset_fn():\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [1., .5, 1., 0.],\n \"predictions\": [1., .75, .25, 0.]}).repeat()\n\n\n# TODO(priyag): Add TPU Strategy to this once metrics aggregate correctly using\n# TowerLocalVariables on TPUs. Submit http://cl/208914352.\ndef all_combinations():\n return combinations.combine(\n distribution=[combinations.default_strategy,\n combinations.one_device_strategy,\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus],\n mode=[\"graph\"])\n\n\n# TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k,\n# metrics.precision_at_k\nclass MetricsV1Test(test.TestCase, parameterized.TestCase):\n\n def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn):\n with ops.Graph().as_default(), distribution.scope():\n iterator = distribution.distribute_dataset(\n dataset_fn).make_initializable_iterator()\n value, update = distribution.call_for_each_tower(\n metric_fn, iterator.get_next())\n update = distribution.group(update)\n self.evaluate(iterator.initializer)\n self.evaluate(variables.local_variables_initializer())\n # TODO(josh11b): Once we switch to using a global batch size for input,\n # replace \"distribution.num_towers\" with \"1\".\n batches_per_update = distribution.num_towers\n\n # Update variables using the first `num_towers` batches.\n self.evaluate(update)\n self.assertAllClose(expected_fn(batches_per_update), self.evaluate(value),\n 0.001, msg=\"After first update\")\n\n # Update variables using the second `num_towers` batches.\n self.evaluate(update)\n self.assertAllClose(expected_fn(2 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After second update\")\n\n if batches_per_update == 1: # Consume 4 input batches\n self.evaluate(update)\n self.assertAllClose(expected_fn(3 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After third update\")\n self.evaluate(update)\n self.assertAllClose(expected_fn(4 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After fourth update\")\n\n @combinations.generate(all_combinations())\n def testMean(self, distribution):\n def _dataset_fn():\n return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(4)\n\n def _expected_fn(num_batches):\n # Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.\n return num_batches * 2 - 0.5\n\n self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAccuracy(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.accuracy(labels, predictions)\n\n def _expected_fn(num_batches):\n return [3./4, 3./8, 3./12, 4./16][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanPerClassAccuracy(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_per_class_accuracy(\n labels, predictions, num_classes=5)\n\n def _expected_fn(num_batches):\n mean = lambda x: sum(x) / len(x)\n return [mean([1., 1., 1., 0., 0.]),\n mean([0.5, 0.5, 0.5, 0., 0.]),\n mean([1./3, 1./3, 0.5, 0., 0.]),\n mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanIOU(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_iou(\n labels, predictions, num_classes=5)\n\n def _expected_fn(num_batches):\n mean = lambda x: sum(x) / len(x)\n return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch\n mean([1./4, 1./4, 1./3, 0., 0.]),\n mean([1./6, 1./6, 1./5, 0., 0.]),\n mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanTensor(self, distribution):\n def _dataset_fn():\n dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float)\n # Want to produce a fixed, known shape, so drop remainder when batching.\n dataset = dataset.batch(4, drop_remainder=True)\n return dataset\n\n def _expected_fn(num_batches):\n # Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2\n # Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1\n # Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches\n # Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1\n first = 2. * num_batches - 2.\n return [first, first + 1., first + 2., first + 3.]\n\n self._test_metric(\n distribution, _dataset_fn, metrics.mean_tensor, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAUCROC(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.auc(labels, predictions, num_thresholds=8, curve=\"ROC\",\n summation_method=\"careful_interpolation\")\n\n def _expected_fn(num_batches):\n return [0.5, 7./9, 0.8, 0.75][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAUCPR(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.auc(labels, predictions, num_thresholds=8, curve=\"PR\",\n summation_method=\"careful_interpolation\")\n\n def _expected_fn(num_batches):\n return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalseNegatives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_negatives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 1., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalseNegativesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_negatives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [1.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTrueNegatives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_negatives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 1., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTrueNegativesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_negatives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[0.], [1.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalsePositives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_positives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 2., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalsePositivesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_positives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [2.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTruePositives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_positives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 2., 3., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTruePositivesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_positives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [2.], [3.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testPrecision(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.precision(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0.5, 0.5, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testPrecisionAtThreshold(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.precision_at_thresholds(labels, predictions, [0.5])\n\n def _expected_fn(num_batches):\n return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRecall(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.recall(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0.5, 2./3, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRecallAtThreshold(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.recall_at_thresholds(labels, predictions, [0.5])\n\n def _expected_fn(num_batches):\n return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanSquaredError(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_squared_error(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 1./32, 0.208333, 0.15625][num_batches - 1]\n\n self._test_metric(\n distribution, _regression_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRootMeanSquaredError(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.root_mean_squared_error(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 0.176777, 0.456435, 0.395285][num_batches - 1]\n\n self._test_metric(\n distribution, _regression_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testSensitivityAtSpecificity(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.sensitivity_at_specificity(labels, predictions, 0.8)\n\n def _expected_fn(num_batches):\n return [0.5, 2./3, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testSpecificityAtSensitivity(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.specificity_at_sensitivity(labels, predictions, 0.95)\n\n def _expected_fn(num_batches):\n return [0., 1./3, 0.5, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n\nif __name__ == \"__main__\":\n test.main()\n" ]
[ [ "tensorflow.python.ops.metrics.mean_squared_error", "tensorflow.python.ops.metrics.false_positives", "tensorflow.python.data.ops.dataset_ops.Dataset.range", "tensorflow.python.ops.metrics.precision_at_thresholds", "tensorflow.python.ops.metrics.false_negatives_at_thresholds", "tensorflow.python.ops.metrics.false_positives_at_thresholds", "tensorflow.python.ops.metrics.precision", "tensorflow.python.framework.ops.Graph", "tensorflow.python.ops.metrics.sensitivity_at_specificity", "tensorflow.python.ops.variables.local_variables_initializer", "tensorflow.python.ops.metrics.true_negatives", "tensorflow.python.ops.metrics.mean_iou", "tensorflow.python.eager.test.main", "tensorflow.python.ops.metrics.true_positives_at_thresholds", "tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices", "tensorflow.python.ops.metrics.true_negatives_at_thresholds", "tensorflow.contrib.distribute.python.combinations.combine", "tensorflow.python.ops.metrics.recall", "tensorflow.python.ops.metrics.specificity_at_sensitivity", "tensorflow.python.ops.metrics.mean_per_class_accuracy", "tensorflow.python.ops.metrics.accuracy", "tensorflow.python.ops.metrics.false_negatives", "tensorflow.python.ops.metrics.root_mean_squared_error", "tensorflow.python.ops.metrics.recall_at_thresholds", "tensorflow.python.ops.metrics.auc", "tensorflow.python.ops.metrics.true_positives" ] ]
keflavich/sedfitter
[ "ec8722ec423ac684e4930fe23a98cd7b2d5b9f50" ]
[ "sedfitter/models.py" ]
[ "from __future__ import print_function, division\n\nimport os\n\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy import units as u\n\nfrom .convolved_fluxes import ConvolvedFluxes, MonochromaticFluxes\nfrom . import fitting_routines as f\nfrom .utils import parfile\nfrom .utils.validator import validate_array\nfrom .fit_info import FitInfo\nfrom .filter import Filter\nfrom . import six\n\n\nclass Models(object):\n\n def __init__(self):\n\n self.names = None\n self.fluxes = None\n self.distances = None\n self.apertures = None\n self.logd = None\n self.wavelengths = None\n self.distances = None\n self.extended = []\n\n @property\n def wavelengths(self):\n \"\"\"\n The wavelengths at which the models are defined\n \"\"\"\n return self._wavelengths\n\n @wavelengths.setter\n def wavelengths(self, value):\n if value is None:\n self._wavelengths = None\n else:\n self._wavelengths = validate_array('wavelengths', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def distances(self):\n \"\"\"\n The distances at which the models are defined\n \"\"\"\n return self._distances\n\n @distances.setter\n def distances(self, value):\n if value is None:\n self._distances = None\n else:\n self._distances = validate_array('distances', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def apertures(self):\n \"\"\"\n The apertures at which the fluxes are defined\n \"\"\"\n return self._apertures\n\n @apertures.setter\n def apertures(self, value):\n if value is None:\n self._apertures = None\n else:\n self._apertures = validate_array('apertures', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def fluxes(self):\n \"\"\"\n The model fluxes\n \"\"\"\n return self._fluxes\n\n @fluxes.setter\n def fluxes(self, value):\n if value is None:\n self._fluxes = value\n else:\n if self.n_distances is None:\n self._fluxes = validate_array('fluxes', value, ndim=2,\n shape=(self.n_models, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n else:\n self._fluxes = validate_array('fluxes', value, ndim=3,\n shape=(self.n_models, self.n_distances, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n\n @property\n def n_ap(self):\n if self.apertures is None:\n return 1\n else:\n return len(self.apertures)\n\n @property\n def n_wav(self):\n if self.wavelengths is None:\n return None\n else:\n return len(self.wavelengths)\n\n @property\n def n_distances(self):\n if self.distances is None:\n return None\n else:\n return len(self.distances)\n\n @property\n def n_models(self):\n if self.names is None:\n return None\n else:\n return len(self.names)\n\n @property\n def valid(self):\n if self.fluxes is None:\n return None\n else:\n return self.fluxes != 0\n\n @property\n def log_fluxes_mJy(self):\n values = np.zeros(self.fluxes.shape)\n values[~self.valid] = -np.inf\n values[self.valid] = np.log10(self.fluxes[self.valid].to(u.mJy).value)\n return values\n\n @classmethod\n def read(cls, directory, filters, distance_range=None, remove_resolved=False):\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n if modpar.get('version', 1) == 1:\n return cls._read_version_1(directory, filters,\n distance_range=distance_range,\n remove_resolved=remove_resolved)\n else:\n return cls._read_version_2(directory, filters,\n distance_range=distance_range,\n remove_resolved=remove_resolved)\n\n @classmethod\n def _read_version_1(cls, directory, filters, distance_range=None, remove_resolved=None):\n\n m = cls()\n\n # Read in model parameters\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n\n print(\" ------------------------------------------------------------\")\n print(\" => Model parameters\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n print(\" Models : %s\" % modpar['name'])\n print(\" Log[d] stepping : %g\" % modpar['logd_step'])\n\n if modpar['aperture_dependent']:\n\n distance_range_kpc = distance_range.to(u.kpc).value\n\n if distance_range:\n if distance_range_kpc[0] == distance_range_kpc[1]:\n n_distances = 1\n m.distances = np.array([distance_range_kpc[0]]) * u.kpc\n else:\n n_distances = 1 + (np.log10(distance_range_kpc[1]) - np.log10(distance_range_kpc[0])) / modpar['logd_step']\n m.distances = np.logspace(np.log10(distance_range_kpc[0]), np.log10(distance_range_kpc[1]), n_distances) * u.kpc\n print(\" Number of distances : %i\" % m.n_distances)\n else:\n raise Exception(\"For aperture-dependent models, a distange range is required\")\n\n print(\"\")\n print(\" ------------------------------------------------------------\")\n print(\" => Reading in convolved fluxes\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n\n m.wavelengths = np.zeros(len(filters)) * u.micron\n\n for ifilt, filt in enumerate(filters):\n\n filename = '%s/convolved/%s.fits' % (directory, filt['name'])\n\n if not os.path.exists(filename):\n if os.path.exists(filename + '.gz'):\n filename += '.gz'\n else:\n raise Exception(\"File not found: \" + filename)\n\n print(\" Reading \" + filename)\n\n conv = ConvolvedFluxes.read(filename)\n\n if ifilt == 0:\n if m.n_distances is None:\n model_fluxes = np.zeros((conv.n_models, len(filters))) * u.mJy\n extended = None\n else:\n model_fluxes = np.zeros((conv.n_models, m.n_distances, len(filters))) * u.mJy\n extended = np.zeros((conv.n_models, m.n_distances, len(filters)), dtype=bool)\n\n m.wavelengths[ifilt] = conv.central_wavelength\n\n if m.n_distances is not None:\n apertures_au = filt['aperture_arcsec'] * m.distances.to(u.pc).value * u.au\n conv = conv.interpolate(apertures_au)\n conv.flux = conv.flux * (u.kpc / m.distances) ** 2\n m.logd = np.log10(m.distances.to(u.kpc).value)\n if remove_resolved:\n extended[:, :, ifilt] = apertures_au[np.newaxis,:] < conv.find_radius_sigma(0.5)[:, np.newaxis]\n model_fluxes[:, :, ifilt] = conv.flux\n else:\n model_fluxes[:, ifilt] = conv.flux[:, 0]\n\n try:\n m.names = np.char.strip(conv.model_names)\n except:\n m.names = np.array([x.strip() for x in conv.model_names], dtype=conv.model_names.dtype)\n\n m.fluxes = model_fluxes\n\n if extended is not None:\n m.extended = extended\n\n return m\n\n @classmethod\n def _read_version_2(cls, directory, filters, distance_range=None, remove_resolved=None):\n\n m = cls()\n\n # Read in model parameters\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n\n print(\" ------------------------------------------------------------\")\n print(\" => Model parameters\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n print(\" Models : %s\" % modpar['name'])\n print(\" Log[d] stepping : %g\" % modpar['logd_step'])\n\n if modpar['aperture_dependent']:\n\n distance_range_kpc = distance_range.to(u.kpc).value\n\n if distance_range:\n if distance_range_kpc[0] == distance_range_kpc[1]:\n n_distances = 1\n m.distances = np.array([distance_range_kpc[0]]) * u.kpc\n else:\n n_distances = 1 + (np.log10(distance_range_kpc[1]) - np.log10(distance_range_kpc[0])) / modpar['logd_step']\n m.distances = np.logspace(np.log10(distance_range_kpc[0]), np.log10(distance_range_kpc[1]), n_distances) * u.kpc\n print(\" Number of distances : %i\" % m.n_distances)\n else:\n raise Exception(\"For aperture-dependent models, a distange range is required\")\n\n print(\"\")\n print(\" ------------------------------------------------------------\")\n print(\" => Reading in convolved fluxes\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n\n # Start off by reading in main flux cube\n from .sed.cube import SEDCube\n cube = SEDCube.read(os.path.join(directory, 'flux.fits'))\n\n # Initialize model flux array and array to indicate whether models are\n # extended\n if m.n_distances is None:\n model_fluxes = np.zeros((cube.n_models, len(filters))) * u.mJy\n extended = None\n else:\n model_fluxes = np.zeros((cube.n_models, m.n_distances, len(filters))) * u.mJy\n extended = np.zeros((cube.n_models, m.n_distances, len(filters)), dtype=bool)\n\n # Define empty wavelength array\n m.wavelengths = np.zeros(len(filters)) * u.micron\n\n for ifilt, filt in enumerate(filters):\n\n if 'name' in filt:\n\n filename = '%s/convolved/%s.fits' % (directory, filt['name'])\n\n if not os.path.exists(filename):\n if os.path.exists(filename + '.gz'):\n filename += '.gz'\n else:\n raise Exception(\"File not found: \" + filename)\n\n print(\" Reading \" + filename)\n\n conv = ConvolvedFluxes.read(filename)\n\n m.wavelengths[ifilt] = conv.central_wavelength\n\n elif 'wav' in filt:\n\n # Find wavelength index\n wavelength_index = np.argmin(np.abs(cube.wav - filt['wav']))\n\n print(\" Reading fluxes at {0}\".format(filt['wav']))\n\n conv = MonochromaticFluxes.from_sed_cube(cube, wavelength_index)\n\n m.wavelengths[ifilt] = filt['wav']\n\n if m.n_distances is not None:\n apertures_au = filt['aperture_arcsec'] * m.distances.to(u.pc).value * u.au\n conv = conv.interpolate(apertures_au)\n conv.flux = conv.flux * (u.kpc / m.distances) ** 2\n m.logd = np.log10(m.distances.to(u.kpc).value)\n # TODO: rather than compute the radius for each model, just\n # check directly the condition.\n if remove_resolved:\n extended[:, :, ifilt] = apertures_au[np.newaxis,:] < conv.find_radius_sigma(0.5)[:, np.newaxis]\n model_fluxes[:, :, ifilt] = conv.flux\n else:\n model_fluxes[:, ifilt] = conv.flux[:, 0]\n\n try:\n m.names = np.char.strip(conv.model_names)\n except:\n m.names = np.array([x.strip() for x in conv.model_names], dtype=conv.model_names.dtype)\n\n m.fluxes = model_fluxes\n\n if extended is not None:\n m.extended = extended\n\n return m\n\n def fit(self, source, av_law, sc_law, av_min, av_max, output_convolved=False):\n\n weight, log_flux, log_error = source.get_log_fluxes()\n\n model_fluxes = self.log_fluxes_mJy\n\n if model_fluxes.ndim == 2: # Aperture-independent fitting\n\n # Use 2-parameter linear regression to find the best-fit av and scale for each model\n residual = log_flux - model_fluxes\n av_best, sc_best = f.linear_regression(residual, weight, av_law, sc_law)\n\n # Use optimal scaling for Avs that are outside range\n reset1 = (av_best < av_min)\n reset2 = (av_best > av_max)\n av_best[reset1] = av_min\n av_best[reset2] = av_max\n reset = reset1 | reset2\n sc_best[reset] = f.optimal_scaling(residual[reset] - av_best[reset][:, np.newaxis] * av_law[np.newaxis, :], weight, sc_law)\n\n # Compute best-fit model in each case\n model = av_best[:, np.newaxis] * av_law[np.newaxis, :] + sc_best[:, np.newaxis] * sc_law[np.newaxis,:]\n\n # Calculate the chi-squared value\n ch_best = f.chi_squared(source.valid, residual, log_error, weight, model)\n\n # Extract convolved model fluxes for best-fit\n model_fluxes = model + model_fluxes\n\n elif model_fluxes.ndim == 3: # Aperture dependent fitting\n\n # Use optimal scaling to fit the Av\n residual = log_flux - model_fluxes\n av_best = f.optimal_scaling(residual, weight, av_law)\n\n # Reset to valid range\n av_best[av_best < av_min] = av_min\n av_best[av_best > av_max] = av_max\n\n # Compute best-fit model in each case\n model = av_best[:, :, np.newaxis] * av_law[np.newaxis, np.newaxis,:]\n\n # Calculate the chi-squared value\n ch_best = f.chi_squared(source.valid, residual, log_error, weight, model)\n\n # Remove extended objects\n if type(self.extended) == np.ndarray:\n reset = np.any(self.extended[:, :, source.valid > 0], axis=2)\n ch_best[reset] = np.inf\n\n # Find best-fit distance in each case\n best = np.argmin(ch_best, axis=1)\n\n sc_best = self.logd[best]\n\n ch_best = ch_best[np.arange(self.n_models), best]\n av_best = av_best[np.arange(self.n_models), best]\n\n # Extract convolved model fluxes for best-fit\n model_fluxes = (model + model_fluxes)[np.arange(self.n_models), best, :]\n\n else:\n\n raise Exception(\"Unexpected number of dimensions in flux array\")\n\n info = FitInfo()\n info.source = source\n info.av = av_best\n info.sc = sc_best\n info.chi2 = ch_best\n info.model_name = self.names\n info.model_fluxes = model_fluxes\n info.sort()\n\n return info\n\n\ndef load_parameter_table(model_dir):\n\n if os.path.exists(model_dir + '/parameters.fits'):\n t = Table.read(model_dir + '/parameters.fits')\n elif os.path.exists(model_dir + '/parameters.fits.gz'):\n t = Table.read(model_dir + '/parameters.fits.gz')\n else:\n raise Exception(\"Parameter file not found in %s\" % model_dir)\n\n return t\n" ]
[ [ "numpy.zeros", "numpy.argmin", "numpy.any", "numpy.abs", "numpy.char.strip", "numpy.arange", "numpy.log10", "numpy.array" ] ]
Next-Trends/rasa
[ "c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7" ]
[ "rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py" ]
[ "from __future__ import annotations\nimport logging\nimport re\nfrom typing import Any, Dict, List, Optional, Text, Tuple, Type\nimport numpy as np\nimport scipy.sparse\nfrom rasa.nlu.tokenizers.tokenizer import Tokenizer\n\nimport rasa.shared.utils.io\nimport rasa.utils.io\nimport rasa.nlu.utils.pattern_utils as pattern_utils\nfrom rasa.engine.graph import ExecutionContext, GraphComponent\nfrom rasa.engine.recipes.default_recipe import DefaultV1Recipe\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.nlu.constants import TOKENS_NAMES\nfrom rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer\nfrom rasa.shared.nlu.constants import TEXT, RESPONSE, ACTION_TEXT\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\n DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=True\n)\nclass RegexFeaturizer(SparseFeaturizer, GraphComponent):\n \"\"\"Adds message features based on regex expressions.\"\"\"\n\n @classmethod\n def required_components(cls) -> List[Type]:\n \"\"\"Components that should be included in the pipeline before this component.\"\"\"\n return [Tokenizer]\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns the component's default config.\"\"\"\n return {\n **SparseFeaturizer.get_default_config(),\n # text will be processed with case sensitive as default\n \"case_sensitive\": True,\n # use lookup tables to generate features\n \"use_lookup_tables\": True,\n # use regexes to generate features\n \"use_regexes\": True,\n # use match word boundaries for lookup table\n \"use_word_boundaries\": True,\n }\n\n def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n known_patterns: Optional[List[Dict[Text, Text]]] = None,\n ) -> None:\n \"\"\"Constructs new features for regexes and lookup table using regex expressions.\n\n Args:\n config: Configuration for the component.\n model_storage: Storage which graph components can use to persist and load\n themselves.\n resource: Resource locator for this component which can be used to persist\n and load itself from the `model_storage`.\n execution_context: Information about the current graph run.\n known_patterns: Regex Patterns the component should pre-load itself with.\n \"\"\"\n super().__init__(execution_context.node_name, config)\n\n self._model_storage = model_storage\n self._resource = resource\n\n self.known_patterns = known_patterns if known_patterns else []\n self.case_sensitive = config[\"case_sensitive\"]\n self.finetune_mode = execution_context.is_finetuning\n\n @classmethod\n def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> RegexFeaturizer:\n \"\"\"Creates a new untrained component (see parent class for full docstring).\"\"\"\n return cls(config, model_storage, resource, execution_context)\n\n def _merge_new_patterns(self, new_patterns: List[Dict[Text, Text]]) -> None:\n \"\"\"Updates already known patterns with new patterns extracted from data.\n\n New patterns should always be added to the end of the existing\n patterns and the order of the existing patterns should not be disturbed.\n\n Args:\n new_patterns: Patterns extracted from training data and to be merged with\n known patterns.\n \"\"\"\n pattern_name_index_map = {\n pattern[\"name\"]: index for index, pattern in enumerate(self.known_patterns)\n }\n for extra_pattern in new_patterns:\n new_pattern_name = extra_pattern[\"name\"]\n\n # Some patterns may have just new examples added\n # to them. These do not count as additional pattern.\n if new_pattern_name in pattern_name_index_map:\n self.known_patterns[pattern_name_index_map[new_pattern_name]][\n \"pattern\"\n ] = extra_pattern[\"pattern\"]\n else:\n self.known_patterns.append(extra_pattern)\n\n def train(self, training_data: TrainingData) -> Resource:\n \"\"\"Trains the component with all patterns extracted from training data.\"\"\"\n patterns_from_data = pattern_utils.extract_patterns(\n training_data,\n use_lookup_tables=self._config[\"use_lookup_tables\"],\n use_regexes=self._config[\"use_regexes\"],\n use_word_boundaries=self._config[\"use_word_boundaries\"],\n )\n if self.finetune_mode:\n # Merge patterns extracted from data with known patterns\n self._merge_new_patterns(patterns_from_data)\n else:\n self.known_patterns = patterns_from_data\n\n self._persist()\n return self._resource\n\n def process_training_data(self, training_data: TrainingData) -> TrainingData:\n \"\"\"Processes the training examples (see parent class for full docstring).\"\"\"\n for example in training_data.training_examples:\n for attribute in [TEXT, RESPONSE, ACTION_TEXT]:\n self._text_features_with_regex(example, attribute)\n\n return training_data\n\n def process(self, messages: List[Message]) -> List[Message]:\n \"\"\"Featurizes all given messages in-place.\n\n Returns:\n the given list of messages which have been modified in-place\n \"\"\"\n for message in messages:\n self._text_features_with_regex(message, TEXT)\n\n return messages\n\n def _text_features_with_regex(self, message: Message, attribute: Text) -> None:\n \"\"\"Helper method to extract features and set them appropriately in the message.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n \"\"\"\n if self.known_patterns:\n sequence_features, sentence_features = self._features_for_patterns(\n message, attribute\n )\n\n self.add_features_to_message(\n sequence_features, sentence_features, attribute, message\n )\n\n def _features_for_patterns(\n self, message: Message, attribute: Text\n ) -> Tuple[Optional[scipy.sparse.coo_matrix], Optional[scipy.sparse.coo_matrix]]:\n \"\"\"Checks which known patterns match the message.\n\n Given a sentence, returns a vector of {1,0} values indicating which\n regexes did match. Furthermore, if the\n message is tokenized, the function will mark all tokens with a dict\n relating the name of the regex to whether it was matched.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n\n Returns:\n Token and sentence level features of message attribute.\n \"\"\"\n # Attribute not set (e.g. response not present)\n if not message.get(attribute):\n return None, None\n\n tokens = message.get(TOKENS_NAMES[attribute], [])\n\n if not tokens:\n # nothing to featurize\n return None, None\n\n flags = 0 # default flag\n if not self.case_sensitive:\n flags = re.IGNORECASE\n\n sequence_length = len(tokens)\n\n num_patterns = len(self.known_patterns)\n\n sequence_features = np.zeros([sequence_length, num_patterns])\n sentence_features = np.zeros([1, num_patterns])\n\n for pattern_index, pattern in enumerate(self.known_patterns):\n matches = re.finditer(\n pattern[\"pattern\"], message.get(attribute), flags=flags\n )\n matches = list(matches)\n\n for token_index, t in enumerate(tokens):\n patterns = t.get(\"pattern\", default={})\n patterns[pattern[\"name\"]] = False\n\n for match in matches:\n if t.start < match.end() and t.end > match.start():\n patterns[pattern[\"name\"]] = True\n sequence_features[token_index][pattern_index] = 1.0\n if attribute in [RESPONSE, TEXT, ACTION_TEXT]:\n # sentence vector should contain all patterns\n sentence_features[0][pattern_index] = 1.0\n\n t.set(\"pattern\", patterns)\n\n return (\n scipy.sparse.coo_matrix(sequence_features),\n scipy.sparse.coo_matrix(sentence_features),\n )\n\n @classmethod\n def load(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n **kwargs: Any,\n ) -> RegexFeaturizer:\n \"\"\"Loads trained component (see parent class for full docstring).\"\"\"\n known_patterns = None\n\n try:\n with model_storage.read_from(resource) as model_dir:\n patterns_file_name = model_dir / \"patterns.pkl\"\n known_patterns = rasa.shared.utils.io.read_json_file(patterns_file_name)\n except (ValueError, FileNotFoundError):\n logger.warning(\n f\"Failed to load `{cls.__class__.__name__}` from model storage. \"\n f\"Resource '{resource.name}' doesn't exist.\"\n )\n\n return cls(\n config,\n model_storage,\n resource,\n execution_context,\n known_patterns=known_patterns,\n )\n\n def _persist(self) -> None:\n with self._model_storage.write_to(self._resource) as model_dir:\n regex_file = model_dir / \"patterns.pkl\"\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n regex_file, self.known_patterns\n )\n\n @classmethod\n def validate_config(cls, config: Dict[Text, Any]) -> None:\n \"\"\"Validates that the component is configured properly.\"\"\"\n pass\n" ]
[ [ "numpy.zeros" ] ]
arnaudgelas/pytorch-lightning
[ "cc624358c8e396e966f9c51b3010f6a986047fc6" ]
[ "tests/models/test_restore.py" ]
[ "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport logging as log\nimport os\nimport pickle\nfrom copy import deepcopy\n\nimport cloudpickle\nimport pytest\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nimport tests.base.develop_pipelines as tpipes\nimport tests.base.develop_utils as tutils\nfrom pytorch_lightning import Callback, LightningModule, Trainer, seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom tests.base import BoringModel, EvalModelTemplate, GenericEvalModelTemplate, TrialMNIST\n\n\nclass ModelTrainerPropertyParity(Callback):\n\n def _check_properties(self, trainer, pl_module):\n assert trainer.global_step == pl_module.global_step\n assert trainer.current_epoch == pl_module.current_epoch\n\n def on_train_start(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n def on_train_batch_start(self, trainer, pl_module, *args, **kwargs):\n self._check_properties(trainer, pl_module)\n\n def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):\n self._check_properties(trainer, pl_module)\n\n def on_epoch_end(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n def on_train_end(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_model_properties_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that properties like `current_epoch` and `global_step`\n in model and trainer are always the same. \"\"\"\n model = EvalModelTemplate()\n checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer_args = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=False,\n enable_pl_optimizer=enable_pl_optimizer,\n callbacks=[checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions\n )\n trainer = Trainer(**trainer_args)\n trainer.fit(model)\n\n trainer_args.update(max_epochs=2)\n trainer = Trainer(**trainer_args, resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n trainer.fit(model)\n\n\ndef test_try_resume_from_non_existing_checkpoint(tmpdir):\n \"\"\" Test that trying to resume from non-existing `resume_from_checkpoint` fail without error.\"\"\"\n model = BoringModel()\n checkpoint_cb = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=False,\n callbacks=[checkpoint_cb],\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n )\n # Generate checkpoint `last.ckpt` with BoringModel\n trainer.fit(model)\n # `True` if resume/restore successfully else `False`\n assert trainer.checkpoint_connector.restore(str(tmpdir / \"last.ckpt\"), trainer.on_gpu)\n assert not trainer.checkpoint_connector.restore(str(tmpdir / \"last_non_existing.ckpt\"), trainer.on_gpu)\n\n\nclass CaptureCallbacksBeforeTraining(Callback):\n callbacks = []\n\n def on_train_start(self, trainer, pl_module):\n self.callbacks = deepcopy(trainer.callbacks)\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_callbacks_state_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that resuming from a checkpoint restores callbacks that persist state. \"\"\"\n model = EvalModelTemplate()\n callback_capture = CaptureCallbacksBeforeTraining()\n\n def get_trainer_args():\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer_args = dict(\n default_root_dir=tmpdir,\n max_steps=1,\n logger=False,\n enable_pl_optimizer=enable_pl_optimizer,\n callbacks=[\n checkpoint,\n callback_capture,\n ]\n )\n assert checkpoint.best_model_path == \"\"\n assert checkpoint.best_model_score is None\n return trainer_args\n\n # initial training\n trainer = Trainer(**get_trainer_args())\n trainer.fit(model)\n callbacks_before_resume = deepcopy(trainer.callbacks)\n\n # resumed training\n trainer = Trainer(**get_trainer_args(), resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n trainer.fit(model)\n\n assert len(callbacks_before_resume) == len(callback_capture.callbacks)\n\n for before, after in zip(callbacks_before_resume, callback_capture.callbacks):\n if isinstance(before, ModelCheckpoint):\n assert before.best_model_path == after.best_model_path\n assert before.best_model_score == after.best_model_score\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_callbacks_references_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that resuming from a checkpoint sets references as expected. \"\"\"\n model = EvalModelTemplate()\n args = {'default_root_dir': tmpdir, 'max_steps': 1, 'logger': False, \"enable_pl_optimizer\": enable_pl_optimizer}\n\n # initial training\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer = Trainer(**args, callbacks=[checkpoint])\n assert checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback\n trainer.fit(model)\n\n # resumed training\n new_checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n # pass in a new checkpoint object, which should take\n # precedence over the one in the last.ckpt file\n trainer = Trainer(**args, callbacks=[new_checkpoint], resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n assert checkpoint is not new_checkpoint\n assert new_checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_running_test_pretrained_model_distrib_dp(tmpdir):\n \"\"\"Verify `test()` on pretrained model.\"\"\"\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n\n # exp file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n gpus=[0, 1],\n accelerator='dp',\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n # run test set\n new_trainer = Trainer(**trainer_options)\n results = new_trainer.test(pretrained_model)\n pretrained_model.cpu()\n\n # test we have good test accuracy\n acc = results[0]['test_acc']\n assert acc > 0.5, f\"Model failed to get expected {0.5} accuracy. test_acc = {acc}\"\n\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n tpipes.run_prediction(dataloader, pretrained_model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):\n \"\"\"Verify `test()` on pretrained model.\"\"\"\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n\n # exp file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n gpus=[0, 1],\n accelerator='ddp_spawn',\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir)))\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n # run test set\n new_trainer = Trainer(**trainer_options)\n results = new_trainer.test(pretrained_model)\n pretrained_model.cpu()\n\n acc = results[0]['test_acc']\n assert acc > 0.5, f\"Model failed to get expected {0.5} accuracy. test_acc = {acc}\"\n\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n tpipes.run_prediction(dataloader, pretrained_model)\n\n\ndef test_running_test_pretrained_model_cpu(tmpdir):\n \"\"\"Verify test() on pretrained model.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=3,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n new_trainer = Trainer(**trainer_options)\n new_trainer.test(pretrained_model)\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(new_trainer)\n\n\[email protected]('model_template', [EvalModelTemplate, GenericEvalModelTemplate])\ndef test_load_model_from_checkpoint(tmpdir, model_template):\n \"\"\"Verify test() on pretrained model.\"\"\"\n hparams = model_template.get_default_hparams()\n model = model_template(**hparams)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n trainer.test(ckpt_path=None)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n\n # load last checkpoint\n last_checkpoint = sorted(glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, \"*.ckpt\")))[-1]\n\n # Since `EvalModelTemplate` has `_save_hparams = True` by default, check that ckpt has hparams\n ckpt = torch.load(last_checkpoint)\n assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), 'hyper_parameters missing from checkpoints'\n\n # Ensure that model can be correctly restored from checkpoint\n pretrained_model = model_template.load_from_checkpoint(last_checkpoint)\n\n # test that hparams loaded correctly\n for k, v in hparams.items():\n assert getattr(pretrained_model, k) == v\n\n # assert weights are the same\n for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()):\n assert torch.all(torch.eq(old_p, new_p)), 'loaded weights are not the same as the saved weights'\n\n # Check `test` on pretrained model:\n new_trainer = Trainer(**trainer_options)\n new_trainer.test(pretrained_model)\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(new_trainer)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_dp_resume(tmpdir):\n \"\"\"Make sure DP continues training correctly.\"\"\"\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(**hparams)\n\n trainer_options = dict(max_epochs=1, gpus=2, accelerator='dp', default_root_dir=tmpdir)\n\n # get logger\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n # add these to the trainer options\n trainer_options['logger'] = logger\n trainer_options['checkpoint_callback'] = checkpoint\n\n # fit model\n trainer = Trainer(**trainer_options)\n trainer.is_slurm_managing_tasks = True\n result = trainer.fit(model)\n\n # track epoch before saving. Increment since we finished the current epoch, don't want to rerun\n real_global_epoch = trainer.current_epoch + 1\n\n # correct result and ok accuracy\n assert result == 1, 'amp + dp model failed to complete'\n\n # ---------------------------\n # HPC LOAD/SAVE\n # ---------------------------\n # save\n trainer.checkpoint_connector.hpc_save(tmpdir, logger)\n\n # init new trainer\n new_logger = tutils.get_default_logger(tmpdir, version=logger.version)\n trainer_options['logger'] = new_logger\n trainer_options['checkpoint_callback'] = ModelCheckpoint(dirpath=tmpdir)\n trainer_options['limit_train_batches'] = 0.5\n trainer_options['limit_val_batches'] = 0.2\n trainer_options['max_epochs'] = 1\n new_trainer = Trainer(**trainer_options)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_good_acc():\n assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0\n\n # if model and state loaded correctly, predictions will be good even though we\n # haven't trained with the new loaded model\n dp_model = new_trainer.model\n dp_model.eval()\n\n dataloader = trainer.train_dataloader\n tpipes.run_prediction(dataloader, dp_model, dp=True)\n\n # new model\n model = EvalModelTemplate(**hparams)\n model.on_train_start = assert_good_acc\n\n # fit new model which should load hpc weights\n new_trainer.fit(model)\n\n # test freeze on gpu\n model.freeze()\n model.unfreeze()\n\n\ndef test_model_saving_loading(tmpdir):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n default_root_dir=tmpdir,\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n # generate preds before saving model\n model.eval()\n pred_before_saving = model(x)\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(hparams_path, 'hparams.yaml')\n model_2 = EvalModelTemplate.load_from_checkpoint(checkpoint_path=new_weights_path, hparams_file=hparams_path,)\n model_2.eval()\n\n # make prediction\n # assert that both predictions are the same\n new_pred = model_2(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n\[email protected]('url_ckpt', [True, False])\ndef test_strict_model_load_more_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv('TORCH_HOME', tmpdir)\n\n model = EvalModelTemplate()\n # Extra layer\n model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=1, logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')\n hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'\n ckpt_path = hparams_url if url_ckpt else new_weights_path\n\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,\n )\n\n with pytest.raises(RuntimeError, match=r'Unexpected key\\(s\\) in state_dict: \"c_d3.weight\", \"c_d3.bias\"'):\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,\n )\n\n\[email protected]('url_ckpt', [True, False])\ndef test_strict_model_load_less_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv('TORCH_HOME', tmpdir)\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=1, logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')\n hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'\n ckpt_path = hparams_url if url_ckpt else new_weights_path\n\n class CurrentModel(EvalModelTemplate):\n def __init__(self):\n super().__init__()\n self.c_d3 = torch.nn.Linear(7, 7)\n\n CurrentModel.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,\n )\n\n with pytest.raises(RuntimeError, match=r'Missing key\\(s\\) in state_dict: \"c_d3.weight\", \"c_d3.bias\"'):\n CurrentModel.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,\n )\n\n\ndef test_model_pickle(tmpdir):\n model = EvalModelTemplate()\n pickle.dumps(model)\n cloudpickle.dumps(model)\n" ]
[ [ "torch.eq", "torch.nn.Linear", "torch.load", "torch.cuda.device_count" ] ]
fanzhiyan/magenta
[ "622c47c19bb84c6f57b286ed03b738516b2f27d6" ]
[ "magenta/common/nade_test.py" ]
[ "# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for nade.\"\"\"\n\nfrom magenta.common.nade import Nade\nimport tensorflow as tf\n\n\nclass NadeTest(tf.test.TestCase):\n\n def testInternalBias(self):\n batch_size = 4\n num_hidden = 6\n num_dims = 8\n test_inputs = tf.random_normal(shape=(batch_size, num_dims))\n nade = Nade(num_dims, num_hidden, internal_bias=True)\n log_prob, cond_probs = nade.log_prob(test_inputs)\n sample, sample_prob = nade.sample(n=batch_size)\n with self.test_session() as sess:\n sess.run([tf.global_variables_initializer()])\n self.assertEqual(log_prob.eval().shape, (batch_size,))\n self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample_prob.eval().shape, (batch_size,))\n\n def testExternalBias(self):\n batch_size = 4\n num_hidden = 6\n num_dims = 8\n test_inputs = tf.random_normal(shape=(batch_size, num_dims))\n test_b_enc = tf.random_normal(shape=(batch_size, num_hidden))\n test_b_dec = tf.random_normal(shape=(batch_size, num_dims))\n\n nade = Nade(num_dims, num_hidden)\n log_prob, cond_probs = nade.log_prob(test_inputs, test_b_enc, test_b_dec)\n sample, sample_prob = nade.sample(b_enc=test_b_enc, b_dec=test_b_dec)\n with self.test_session() as sess:\n sess.run([tf.global_variables_initializer()])\n self.assertEqual(log_prob.eval().shape, (batch_size,))\n self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample_prob.eval().shape, (batch_size,))\n\n\nif __name__ == '__main__':\n tf.test.main()\n" ]
[ [ "tensorflow.random_normal", "tensorflow.test.main", "tensorflow.global_variables_initializer" ] ]
paxtonedgar/MisInfo
[ "81b32fa3e7d0d204feb83e10169093f45727a2ea" ]
[ "src/trainers/lstm_attn_trainer.py" ]
[ "\nimport torch\n\nimport numpy as np\n\nfrom src.trainers.base_trainer import BaseTrainer\nfrom src.evaluation.metrics import Metrics\n\n\nclass LSTMAttnTrainer(BaseTrainer):\n \"\"\"\n Trainer class. Optimizer is by default handled by BaseTrainer.\n \"\"\"\n def __init__(self, model, config):\n super(LSTMAttnTrainer, self).__init__(model, config)\n self._log_interval = config['log_interval']\n self._batch_size = config['dataloader_params']['batch_size']\n self._logger.info('Batch size: %d', self._batch_size)\n\n def _train_epoch(self, epoch, train_iter, dev_iter):\n \"\"\"\n :param epoch:\n :param train_iter:\n :param dev_iter:\n :return:\n \"\"\"\n # turn on training mode which enables dropout\n self._model.train()\n\n total_loss = 0\n predicted_values = []\n target_values = []\n\n labels = np.arange(self._model.num_classes)\n\n for batch_idx, batch in enumerate(train_iter):\n (data, lengths), target = self._to_tensor(batch.text, batch.label)\n\n self._optimizer.zero_grad()\n output, attn_w = self._model(data, lengths)\n # output = self._model(data, lengths)\n loss = self._loss_function(output, target, reduction='sum')\n loss.backward()\n self._optimizer.step()\n\n total_loss += loss.item()\n\n predictions = torch.max(output, 1)[1].view(target.size())\n predicted_values.extend(predictions.data.tolist())\n target_values.extend(target.data.tolist())\n\n if (batch_idx + 1) % self._log_interval == 0:\n results = Metrics.metrics(\n predicted_values, target_values, labels\n )\n self._logger.info(\n 'Epoch: {:3d} [{:5d}/{:5.0f} batches] '\n 'Current loss: {:5.6f}, Total average loss: {:5.6f}, '\n 'F-score: {:5.2f}'.format(\n epoch, (batch_idx + 1),\n len(train_iter.dataset) / self._batch_size,\n loss.item() / self._batch_size,\n total_loss / results['n_samples'],\n results['f_score']\n )\n )\n\n results_train = Metrics.metrics(predicted_values, target_values, labels)\n results_train['loss'] = total_loss / results_train['n_samples']\n results_val, _ = self.evaluate(dev_iter)\n\n log = {'epoch': epoch}\n log.update({'train_{}'.format(k): v for k, v in results_train.items()})\n log.update({'val_{}'.format(k): v for k, v in results_val.items()})\n\n return log\n\n def evaluate(self, data_iter):\n \"\"\"\n Validate after training an epoch\n :param data_iter:\n :return:\n \"\"\"\n # switch to evaluation mode (won't dropout)\n self._model.eval()\n\n total_loss = 0\n predicted_values = []\n target_values = []\n\n labels = np.arange(self._model.num_classes)\n\n with torch.no_grad():\n for batch_idx, batch in enumerate(data_iter):\n (data, lengths), target = self._to_tensor(\n batch.text, batch.label\n )\n\n output, attn_w = self._model(data, lengths)\n # output = self._model(data, lengths)\n loss = self._loss_function(output, target, reduction='sum')\n\n total_loss += loss.item()\n\n predictions = torch.max(output, 1)[1].view(target.size())\n predicted_values.extend(predictions.data.tolist())\n target_values.extend(target.data.tolist())\n\n results = Metrics.metrics(predicted_values, target_values, labels)\n results['loss'] = total_loss / results['n_samples']\n\n self._logger.info(\n 'Evaluation: Loss: {:5.6f}, F-score: {:5.2f}% ({}/{})'.format(\n results['loss'], results['f_score'],\n results['correct'], results['n_samples']\n )\n )\n\n return results, predicted_values\n" ]
[ [ "numpy.arange", "torch.no_grad", "torch.max" ] ]
RubenImhoff/Large_Sample_Nowcasting_Evaluation
[ "b2d8500261881a749a8f20815b7e2b0b9b69c4f7", "b2d8500261881a749a8f20815b7e2b0b9b69c4f7" ]
[ "HPCrunScripts/PS_DeterministicNowcast_parallel_advection_24h.py", "pysteps/pysteps/extrapolation/semilagrangian.py" ]
[ "# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 17 07:41:32 2019\n\nDeterministic nowcast with pySTEPS, with extraction of results per catchment. \nBased on the input data for the Ensemble nowcast, but without any ensembles. \n\nMake sure to change the initial part to your case.\n\nNote that this script assumes that the catchments are already reprojected.\n\nTO DO - add _reprojected to input and change this later on in the script.\n\n@author: imhof_rn\n\"\"\"\n\nfrom osgeo import gdal\nfrom osgeo import gdal_array\nfrom osgeo import ogr, osr\n\nimport os\nos.environ['PROJ_LIB'] = r'/u/imhof_rn/anaconda3/pkgs/proj4-5.2.0-h470a237_1/share/proj'\n\nimport mkl\nmkl.set_num_threads(1)\n\nimport datetime\nimport netCDF4\nimport numpy as np\nimport pprint\nimport sys\nimport time\n\nimport pysteps as stp\nimport config as cfg\n\nimport logging\nimport itertools\n\nlogging.basicConfig(level=logging.INFO)\n\n# import message passing interface for python\nfrom mpi4py import MPI\n\n# import for memory use\n#from pympler import tracker\n#tr = tracker.SummaryTracker()\n#tr.print_diff() \n\n###############################################################################\n#################\n# Initial part, only change this\n# NOTE: This script only works when the catchment shapefiles are already reprojected\n# to the KNMI radar dataset.\n#################\n\nos.chdir('/u/imhof_rn/pysteps-0.2')\n\n# Catchment filenames and directories\ncatchments = True # Put on false when you don't want any slicing for catchments (i.e. you will use the full output)\n# If catchments = 'False', uncomment the next two lines.\ncatchment_filenames = [\"/u/imhof_rn/GIS/Catchments_pysteps/Hupsel.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/stroomgebied_Regge.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/GroteWaterleiding.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Aa.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Reusel.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/het_molentje.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Luntersebeek.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Dwarsdiep.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/AfwaterendgebiedBoezemsysteem.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/HHRijnland.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Beemster.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/DeLinde.shp\"] # Put here the locations of the shapefiles\ncatchment_names = ['Hupsel', 'Regge', 'GroteWaterleiding', 'Aa', 'Reusel', 'Molentje', 'Luntersebeek', 'Dwarsdiep', 'Delfland', 'Rijnland', 'Beemster', 'Linde'] # A list of catchment names.\nout_dir = \"/u/imhof_rn/Nowcasts/pySTEPS\" # Just used for logging, the actual\n# out_dir is set in the pystepsrc-file.\n\n# Verification settings\nverification = {\n \"experiment_name\" : \"pysteps_mpi_24hours_deterministic\",\n \"overwrite\" : True, # to recompute nowcasts\n \"v_thresholds\" : [0.1, 1.0], # [mm/h] \n \"v_leadtimes\" : [10, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360], # [min]\n \"v_accu\" : None, # [min]\n \"seed\" : 42, # for reproducibility\n \"doplot\" : True, # save figures\n \"dosaveresults\" : True # save verification scores to csv\n}\n\n# Forecast settings\nforecast = {\n \"n_lead_times\" : 72, # timesteps per nowcast\n \"r_threshold\" : 0.1, # rain/no rain threshold [mm/h]\n \"unit\" : \"mm/h\", # mm/h or dBZ\n \"transformation\" : \"dB\", # None or dB \n \"adjust_domain\" : None # None or square\n}\n\n# The experiment set-up\n## this includes tuneable parameters\nexperiment = {\n ## the events event start event end update cycle data source\n \"data\" : [(\"200801101205\",\"200801111800\",5,\"knmi\"),\n (\"200801190305\",\"200801200900\",5,\"knmi\"),\n (\"200801191005\",\"200801201600\",5,\"knmi\"),\n (\"200801201705\",\"200801212300\",5,\"knmi\"),\n (\"200802042305\",\"200802060500\",5,\"knmi\"),\n (\"200807070605\",\"200807081200\",5,\"knmi\"),\n (\"200808070405\",\"200808081000\",5,\"knmi\"),\n (\"200812100305\",\"200812110900\",5,\"knmi\"),\n (\"200902091005\",\"200902101600\",5,\"knmi\"),\n (\"200905131705\",\"200905142300\",5,\"knmi\"),\n (\"200905161005\",\"200905171600\",5,\"knmi\"),\n (\"200912091805\",\"200912110000\",5,\"knmi\"),\n (\"201005110005\",\"201005120600\",5,\"knmi\"),\n (\"201006090205\",\"201006100800\",5,\"knmi\"),\n (\"201007101005\",\"201007111600\",5,\"knmi\"),\n (\"201007101105\",\"201007111700\",5,\"knmi\"),\n (\"201008251605\",\"201008262200\",5,\"knmi\"),\n (\"201008252105\",\"201008270300\",5,\"knmi\"),\n (\"201008252205\",\"201008270400\",5,\"knmi\"),\n (\"201008252305\",\"201008270500\",5,\"knmi\"),\n (\"201101120405\",\"201101131000\",5,\"knmi\"),\n (\"201106180405\",\"201106191000\",5,\"knmi\"),\n (\"201107131805\",\"201107150000\",5,\"knmi\"),\n (\"201107210105\",\"201107220700\",5,\"knmi\"),\n (\"201107231105\",\"201107241700\",5,\"knmi\"),\n (\"201107271805\",\"201107290000\",5,\"knmi\"),\n (\"201112151205\",\"201112161800\",5,\"knmi\"),\n (\"201112151305\",\"201112161900\",5,\"knmi\"),\n (\"201112311805\",\"201201020000\",5,\"knmi\"),\n (\"201112312105\",\"201201020300\",5,\"knmi\"),\n (\"201201010905\",\"201201021500\",5,\"knmi\"),\n (\"201201041205\",\"201201051800\",5,\"knmi\"),\n (\"201206120205\",\"201206130800\",5,\"knmi\"),\n (\"201207271505\",\"201207282100\",5,\"knmi\"),\n (\"201208010605\",\"201208021200\",5,\"knmi\"),\n (\"201212220305\",\"201212230900\",5,\"knmi\"),\n (\"201212220505\",\"201212231100\",5,\"knmi\"),\n (\"201212241705\",\"201212252300\",5,\"knmi\"),\n (\"201305200605\",\"201305211200\",5,\"knmi\"),\n (\"201312232205\",\"201312250400\",5,\"knmi\"),\n (\"201407080605\",\"201407091200\",5,\"knmi\"),\n (\"201407101205\",\"201407111800\",5,\"knmi\"),\n (\"201407270605\",\"201407281200\",5,\"knmi\"),\n (\"201407271905\",\"201407290100\",5,\"knmi\"),\n (\"201407280605\",\"201407291200\",5,\"knmi\"),\n (\"201412110705\",\"201412121300\",5,\"knmi\"),\n (\"201412110805\",\"201412121400\",5,\"knmi\"),\n (\"201412111205\",\"201412121800\",5,\"knmi\"),\n (\"201412261705\",\"201412272300\",5,\"knmi\"),\n (\"201501071705\",\"201501082300\",5,\"knmi\"),\n (\"201501120805\",\"201501131400\",5,\"knmi\"),\n (\"201501121005\",\"201501131600\",5,\"knmi\"),\n (\"201501121105\",\"201501131700\",5,\"knmi\"),\n (\"201502200805\",\"201502211400\",5,\"knmi\"),\n (\"201508160405\",\"201508171000\",5,\"knmi\"),\n (\"201511292305\",\"201512010500\",5,\"knmi\"),\n (\"201511300205\",\"201512010800\",5,\"knmi\"),\n (\"201601131405\",\"201601142000\",5,\"knmi\"),\n (\"201601291405\",\"201601302000\",5,\"knmi\"),\n (\"201602081205\",\"201602091800\",5,\"knmi\"),\n (\"201602081305\",\"201602091900\",5,\"knmi\"),\n (\"201603040205\",\"201603050800\",5,\"knmi\"),\n (\"201605220405\",\"201605231000\",5,\"knmi\"),\n (\"201605221505\",\"201605232100\",5,\"knmi\"),\n (\"201605312105\",\"201606020300\",5,\"knmi\"),\n (\"201605312305\",\"201606020500\",5,\"knmi\"),\n (\"201606031605\",\"201606042200\",5,\"knmi\"),\n (\"201607210705\",\"201607221300\",5,\"knmi\"),\n (\"201701120505\",\"201701131100\",5,\"knmi\"),\n (\"201701120805\",\"201701131400\",5,\"knmi\"),\n (\"201701121105\",\"201701131700\",5,\"knmi\"),\n (\"201702212105\",\"201702230300\",5,\"knmi\"),\n (\"201706271405\",\"201706282000\",5,\"knmi\"),\n (\"201707231505\",\"201707242100\",5,\"knmi\"),\n (\"201708100005\",\"201708110600\",5,\"knmi\"),\n (\"201708291205\",\"201708301800\",5,\"knmi\"),\n (\"201708291605\",\"201708302200\",5,\"knmi\"),\n (\"201712080205\",\"201712090800\",5,\"knmi\"),\n (\"201712130805\",\"201712141400\",5,\"knmi\"),\n (\"201712301705\",\"201712312300\",5,\"knmi\"),\n (\"201805310605\",\"201806011200\",5,\"knmi\"),\n (\"201812081205\",\"201812091800\",5,\"knmi\")],\n \n ## the methods\n \"oflow_method\" : [\"lucaskanade\"], # lucaskanade, darts\n \"adv_method\" : [\"semilagrangian\"], # semilagrangian, eulerian\n \"nwc_method\" : [\"extrapolation\"],\n \"noise_method\" : [None], # parametric, nonparametric, ssft\n \"decomp_method\" : [\"fft\"],\n \n ## the parameters\n \"n_ens_members\" : [1],\n \"ar_order\" : [2],\n \"n_cascade_levels\" : [8],\n \"noise_adjustment\" : [False],\n \"conditional\" : [False],\n \"precip_mask\" : [True],\n \"mask_method\" : [\"sprog\"], # obs, incremental, sprog\n \"prob_matching\" : [\"mean\"],\n \"num_workers\" : [1], # Set the number of processors available for parallel computing\n \"vel_pert_method\" : [None], # No velocity pertubation in order to allow for deterministic run following Seed et al. [2003]\n}\n\n# End of initial part\n###############################################################################\n\nstart_time = time.time()\n\n#### HERE ALL AVAILABLE PROCESSES AT START-UP TIME ARE COLLECTED IN comm\n#### SEE FOR MORE INFO ON MPI: https://www.cs.earlham.edu/~lemanal/slides/mpi-slides.pdf \ncomm = MPI.COMM_WORLD\nrank = comm.rank\nsize = comm.size\n\nlogging.info(('I am process rank {}'.format(rank)))\n\n#########################################################\n# Open the catchment shapes - They're needed later for the catchment_slice utils\n#########################################################\nshapes = []\n\nfor i in range(0, len(catchment_filenames)):\n shape_filename = catchment_filenames[i]\n \n # set file names in order to obtain the reprojected shapefile, which \n # was made with the catchment_medata functionality.\n dirname = os.path.dirname(shape_filename)\n basename = os.path.basename(shape_filename)\n basenametxt = os.path.splitext(basename)[0]\n shapes_reprojected = os.path.join(dirname, basenametxt+'_Reprojected.shp')\t\n \n driver = ogr.GetDriverByName('ESRI Shapefile')\n shapes.append(driver.Open(shapes_reprojected))\n\n###########\n# Set some first functions\n###########\n\n## define the callback function to export the nowcast to netcdf\nconverter = stp.utils.get_method(\"mm/h\")\ndef export(X_3D):\n \"\"\"\n X_3D 3D forecast consisting of (lead time, h, w)\n \"\"\"\n\n ## Open the array for lead time t and convert to mm/h\n X,_ = converter(X_3D, metadata)\n # readjust to initial domain shape\n X,_ = reshaper(X, metadata, inverse=True)\n\n # Then, slice the array per catchment or not if no catchments are given\n if catchments == True:\n X_catchment = stp.utils.catchment_slice_mpi(X, shapes)\n # Export to netCDF per catchment\n for n in range(0, len(catchment_filenames)):\n key = list(d.keys())[n]\n stp.io.export_forecast_dataset(np.array([X_catchment[n]]), d[key])\n else:\n # We have to change the 2D array to a 3D array (with just 1 ens member)\n X = np.array([X])\n # else, export full radar nowcast to netcdf\n stp.io.export_forecast_dataset(X, exporter)\n \n X = None\n\n# Conditional parameters\n## parameters that can be directly related to other parameters\ndef cond_pars(pars):\n for key in list(pars):\n if key == \"oflow_method\":\n if pars[key].lower() == \"darts\": pars[\"n_prvs_times\"] = 9\n else: pars[\"n_prvs_times\"] = 3\n elif key.lower() == \"n_cascade_levels\":\n if pars[key] == 1 : pars[\"bandpass_filter\"] = \"uniform\"\n else: pars[\"bandpass_filter\"] = \"gaussian\"\n elif key.lower() == \"nwc_method\":\n if pars[key] == \"extrapolation\" : pars[\"n_ens_members\"] = 1\n return pars\n\n#########\n# Make list of parameters (i.e. the different dates - all other parameters are\n# the same for every run) and scatter these over the nodes.\n#########\n \n# Prepare the list of all parameter sets of the verification\nparsets = [[]]\nfor _, items in experiment.items():\n parsets = [parset+[item] for parset in parsets for item in items]\n\nif rank == 0:\n #### Reorganize work a bit so we can scatter it\n keyfunc = lambda x:x[0] % size\n work = itertools.groupby(sorted(enumerate(parsets), key=keyfunc), keyfunc)\n \n #### Expand the work so we get lists of row, col per node\n workpernode = [[x[1] for x in val] for (key, val) in work]\nelse:\n workpernode = None\n\n#### NOW DISTRIBUTE THE WORK\nworkpernode = comm.scatter(workpernode, root=0)\n\nlogging.info(\"Got the following work in process rank {} : {}\".format(rank, workpernode))\n\n#### Each node can now do it's own work. The main advantage is that we can do a gather at the end to collect all results.\n#### Keep track of all the runs per node in scores\n#scores = []\n\n#### before starting any runs, make sure that you know in which folder we run this MPI run routine. \n#### Always return to this folder before the next run\n#curdir = os.getcwd()\nos.chdir('/u/imhof_rn/pysteps-master')\n\n###########\n# Run the model in parallel\n###########\n\n# Now loop all parameter sets\nfor n, parset in enumerate(workpernode):\n# logging.info(\"rank %02.f computing scores for parameter set nr %04.f\" % (rank, n))\n runId = '%s_%04.f' % (out_dir, n)\n \n # Build parameter set\n \n p = {}\n for m, key in enumerate(experiment.keys()):\n p[key] = parset[m]\n ## apply conditional parameters\n p = cond_pars(p)\n ## include all remaining parameters\n p.update(verification)\n p.update(forecast)\n \n# print(\"************************\")\n# print(\"* Parameter set %02d/%02d: *\" % (n+1, len(parsets)))\n# print(\"************************\")\n \n# pprint.pprint(p)\n \n # If necessary, build path to results\n path_to_experiment = os.path.join(cfg.path_outputs, p[\"experiment_name\"])\n # subdir with event date\n path_to_nwc = os.path.join(path_to_experiment, '-'.join([p[\"data\"][0], p[\"data\"][3]]))\n# for key, item in p.items():\n#\t\t# include only variables that change\n# if len(experiment.get(key,[None])) > 1 and key.lower() is not \"data\":\n# path_to_nwc = os.path.join(path_to_nwc, '-'.join([key, str(item)]))\n try:\n os.makedirs(path_to_nwc)\n except OSError:\n pass\n \n # **************************************************************************\n # NOWCASTING\n # ************************************************************************** \n \n # Loop forecasts within given event using the prescribed update cycle interval\n\n ## import data specifications\n ds = cfg.get_specifications(p[\"data\"][3])\n \n if p[\"v_accu\"] is None:\n p[\"v_accu\"] = ds.timestep\n \n # Loop forecasts for given event\n startdate = datetime.datetime.strptime(p[\"data\"][0], \"%Y%m%d%H%M\")\n enddate = datetime.datetime.strptime(p[\"data\"][1], \"%Y%m%d%H%M\")\n countnwc = 0\n while startdate <= enddate:\n try:\n \n # filename of the nowcast netcdf. Set name either per catchment or as \n # total nowcast for the entire radar image.\n if catchments == True:\n outfn = []\n for n in range(0, len(catchment_names)):\n path_to_catchment = os.path.join(path_to_nwc, catchment_names[n])\n try:\n os.makedirs(path_to_catchment)\n Name = os.path.join(path_to_catchment, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n outfn.append(Name)\n except OSError:\n print(\"Catchment outfile directory does already exist for starttime: %s\" % startdate.strftime(\"%Y%m%d%H%M\"))\n Name = os.path.join(path_to_catchment, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n outfn.append(Name)\n else:\n outfn = os.path.join(path_to_nwc, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n \n ## check if results already exists\n if catchments == True:\n run_exist = False\n if os.path.isfile(outfn[n]):\n fid = netCDF4.Dataset(outfn[n], 'r')\n if fid.dimensions[\"time\"].size == p[\"n_lead_times\"]:\n run_exist = True\n if p[\"overwrite\"]:\n os.remove(outfn[n])\n run_exist = False \n else:\n os.remove(outfn[n])\n else:\n run_exist = False\n if os.path.isfile(outfn):\n fid = netCDF4.Dataset(outfn, 'r')\n if fid.dimensions[\"time\"].size == p[\"n_lead_times\"]:\n run_exist = True\n if p[\"overwrite\"]:\n os.remove(outfn)\n run_exist = False \n else:\n os.remove(outfn)\n \n if run_exist:\n print(\"Nowcast %s_nowcast already exists in %s\" % (startdate.strftime(\"%Y%m%d%H%M\"),path_to_nwc))\n \n else:\n countnwc += 1\n print(\"Computing the nowcast (%02d) ...\" % countnwc)\n \n print(\"Starttime: %s\" % startdate.strftime(\"%Y%m%d%H%M\"))\n \n ## redirect stdout to log file\n logfn = os.path.join(path_to_nwc, \"%s_log.txt\" % startdate.strftime(\"%Y%m%d%H%M\")) \n print(\"Log: %s\" % logfn)\n orig_stdout = sys.stdout\n f = open(logfn, 'w')\n sys.stdout = f\n \n print(\"*******************\")\n print(\"* %s *****\" % startdate.strftime(\"%Y%m%d%H%M\"))\n print(\"* Parameter set : *\")\n # pprint.pprint(p) \n print(\"*******************\")\n \n print(\"--- Start of the run : %s ---\" % (datetime.datetime.now()))\n \n ## time\n t0 = time.time()\n \n # Read inputs\n # print(\"Read the data...\")\n \n ## find radar field filenames\n input_files = stp.io.find_by_date(startdate, ds.root_path, ds.path_fmt, ds.fn_pattern,\n ds.fn_ext, ds.timestep, p[\"n_prvs_times\"])\n \n \n ## read radar field files\n importer = stp.io.get_method(ds.importer, type=\"importer\")\n R, _, metadata = stp.io.read_timeseries(input_files, importer, **ds.importer_kwargs)\n metadata0 = metadata.copy()\n metadata0[\"shape\"] = R.shape[1:]\n \n # Prepare input files\n # print(\"Prepare the data...\")\n \n ## if requested, make sure we work with a square domain\n reshaper = stp.utils.get_method(p[\"adjust_domain\"])\n R, metadata = reshaper(R, metadata)\n \n ## if necessary, convert to rain rates [mm/h] \n converter = stp.utils.get_method(\"mm/h\")\n R, metadata = converter(R, metadata)\n \n ## threshold the data\n R[R < p[\"r_threshold\"]] = 0.0\n metadata[\"threshold\"] = p[\"r_threshold\"]\n \n ## convert the data\n converter = stp.utils.get_method(p[\"unit\"])\n R, metadata = converter(R, metadata)\n \n ## transform the data\n transformer = stp.utils.get_method(p[\"transformation\"])\n R, metadata = transformer(R, metadata)\n \n ## set NaN equal to zero\n R[~np.isfinite(R)] = metadata[\"zerovalue\"]\n \n # Compute motion field\n oflow_method = stp.motion.get_method(p[\"oflow_method\"])\n UV = oflow_method(R)\n \n #####\n # Perform the nowcast \n #####\n \n ## initialize netcdf file\n incremental = \"timestep\" if p[\"nwc_method\"].lower() == \"steps\" else None\n if catchments == True:\n metadata_new = stp.utils.catchment_metadata_mpi(shapes, metadata0)\n d = {} \n for n in range(0, len(catchment_filenames)):\n d[\"exporter_{0}\".format(n)] = stp.io.initialize_forecast_exporter_netcdf(outfn[n], startdate,\n ds.timestep, p[\"n_lead_times\"], metadata_new[n][\"shape\"], \n p[\"n_ens_members\"], metadata_new[n], incremental=incremental)\n else:\n exporter = stp.io.initialize_forecast_exporter_netcdf(outfn, startdate,\n ds.timestep, p[\"n_lead_times\"], metadata0[\"shape\"], \n p[\"n_ens_members\"], metadata0, incremental=incremental)\n \n ## start the nowcast\n nwc_method = stp.nowcasts.get_method(p[\"nwc_method\"])\n R_fct = nwc_method(R[-1,:,:], UV, p[\"n_lead_times\"], extrap_method=p[\"adv_method\"])\n \n print(R_fct.shape[0])\n \n export(R_fct)\n \n ## save results, either per catchment or in total\n if catchments == True:\n for n in range(0, len(catchment_filenames)):\n key = list(d.keys())[n]\n stp.io.close_forecast_file(d[key])\n else:\n stp.io.close_forecast_file(exporter)\n R_fct = None\n \n # save log\n print(\"--- End of the run : %s ---\" % (datetime.datetime.now()))\n print(\"--- Total time : %s seconds ---\" % (time.time() - t0))\n sys.stdout = orig_stdout\n f.close()\n \n # next forecast\n startdate += datetime.timedelta(minutes = p[\"data\"][2])\n \n except ValueError:\n print('ValueError')\n # next forecast\n startdate += datetime.timedelta(minutes = p[\"data\"][2])\n\n# tr.print_diff()\n# scores.append(n)\n #### RETURN TO THE CORRECT DIRECTORY, JUST IN CASE SOMETHING WAS CHANGED...\n os.chdir('/u/imhof_rn/pysteps-master')\n\n#### Wait here so we can collect all runs\n#### Because we distributed the work evenly all processes should be here at approximately the same time\ncomm.Barrier()\n#### Great, we're all here. Now let's gather the scores...\n#### Collect values from all the processes in the main root\n#scores = comm.gather(scores, root=0)\n\n#logging.debug(\"Rank {} has scores {}\".format(rank, scores))\n \nend_time = time.time()\n\nprint('Total process took', (end_time - start_time)/3600.0, 'hours') ", "\"\"\"Implementation of the semi-Lagrangian method of Germann et al (2002).\"\"\"\n\nimport numpy as np\nimport scipy.ndimage.interpolation as ip\nimport time\n\ndef extrapolate(R, V, num_timesteps, outval=np.nan, **kwargs):\n \"\"\"Apply semi-Lagrangian extrapolation to a two-dimensional precipitation\n field.\n\n Parameters\n ----------\n R : array-like\n Array of shape (m,n) containing the input precipitation field. All\n values are required to be finite.\n V : array-like\n Array of shape (2,m,n) containing the x- and y-components of the m*n\n advection field. All values are required to be finite.\n num_timesteps : int\n Number of time steps to extrapolate.\n outval : float\n Optional argument for specifying the value for pixels advected from\n outside the domain. If outval is set to 'min', the value is taken as\n the minimum value of R.\n Default : np.nan\n\n Other Parameters\n ----------------\n D_prev : array-like\n Optional initial displacement vector field of shape (2,m,n) for the\n extrapolation.\n Default : None\n n_iter : int\n Number of inner iterations in the semi-Lagrangian scheme.\n Default : 3\n inverse : bool\n If True, the extrapolation trajectory is computed backward along the\n flow (default), forward otherwise.\n Default : True\n return_displacement : bool\n If True, return the total advection velocity (displacement) between the\n initial input field and the advected one integrated along the trajectory.\n Default : False\n\n Returns\n -------\n out : array or tuple\n If return_displacement=False, return a time series extrapolated fields of\n shape (num_timesteps,m,n). Otherwise, return a tuple containing the\n extrapolated fields and the total displacement along the advection trajectory.\n\n References\n ----------\n :cite:`GZ2002`\n\n \"\"\"\n if len(R.shape) != 2:\n raise ValueError(\"R must be a two-dimensional array\")\n\n if len(V.shape) != 3:\n raise ValueError(\"V must be a three-dimensional array\")\n\n if np.any(~np.isfinite(R)):\n raise ValueError(\"R contains non-finite values\")\n\n if np.any(~np.isfinite(V)):\n raise ValueError(\"V contains non-finite values\")\n\n # defaults\n verbose = kwargs.get(\"verbose\", False)\n D_prev = kwargs.get(\"D_prev\", None)\n n_iter = kwargs.get(\"n_iter\", 3)\n inverse = kwargs.get(\"inverse\", True)\n return_displacement = kwargs.get(\"return_displacement\", False)\n\n if verbose:\n print(\"Computing the advection with the semi-lagrangian scheme.\")\n t0 = time.time()\n\n if outval == \"min\":\n outval = np.nanmin(R)\n\n coeff = 1.0 if not inverse else -1.0\n\n X,Y = np.meshgrid(np.arange(V.shape[2]), np.arange(V.shape[1]))\n XY = np.stack([X, Y])\n\n R_e = []\n if D_prev is None:\n D = np.zeros((2, V.shape[1], V.shape[2]))\n else:\n D = D_prev.copy()\n\n for t in range(num_timesteps):\n V_inc = np.zeros(D.shape)\n\n for k in range(n_iter):\n if t > 0 or k > 0 or D_prev is not None:\n XYW = XY + D - V_inc / 2.0\n XYW = [XYW[1, :, :], XYW[0, :, :]]\n\n VWX = ip.map_coordinates(V[0, :, :], XYW, mode=\"nearest\", order=0,\n prefilter=False)\n VWY = ip.map_coordinates(V[1, :, :], XYW, mode=\"nearest\", order=0,\n prefilter=False)\n else:\n VWX = V[0, :, :]\n VWY = V[1, :, :]\n\n V_inc[0, :, :] = VWX / n_iter\n V_inc[1, :, :] = VWY / n_iter\n\n D += coeff * V_inc\n\n XYW = XY + D\n XYW = [XYW[1, :, :], XYW[0, :, :]]\n\n IW = ip.map_coordinates(R, XYW, mode=\"constant\", cval=outval, order=0,\n prefilter=False)\n R_e.append(np.reshape(IW, R.shape))\n\n if verbose:\n print(\"--- %s seconds ---\" % (time.time() - t0))\n\n if not return_displacement:\n return np.stack(R_e)\n else:\n return np.stack(R_e), D\n" ]
[ [ "numpy.array", "numpy.isfinite" ], [ "numpy.zeros", "numpy.reshape", "numpy.nanmin", "numpy.arange", "numpy.stack", "scipy.ndimage.interpolation.map_coordinates", "numpy.isfinite" ] ]
xiaohanhuang/pytorch
[ "a31aea8eaa99a5ff72b5d002c206cd68d5467a5e", "a31aea8eaa99a5ff72b5d002c206cd68d5467a5e" ]
[ "test/ao/sparsity/test_pruner.py", "test/fx2trt/converters/acc_op/test_getitem.py" ]
[ "# -*- coding: utf-8 -*-\n# Owner(s): [\"module: unknown\"]\n\n\nimport copy\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.ao.sparsity import BasePruner, PruningParametrization, ZeroesParametrization\nfrom torch.nn.utils import parametrize\n\nfrom torch.testing._internal.common_utils import TestCase\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nDEVICES = {\n torch.device(\"cpu\"),\n torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n}\n\nNEEDS_ZEROS = { # these layers should have pruned indices zero-ed, not removed\n nn.BatchNorm2d\n}\n\n\nclass Linear(nn.Module):\n r\"\"\"Model with Linear layers, in Sequential and outside, without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16, bias=False)\n )\n self.linear = nn.Linear(16, 16, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass LinearB(nn.Module):\n r\"\"\"Model with Linear layers, in Sequential and outside, with biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16, bias=True)\n )\n self.linear = nn.Linear(16, 16, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinear(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, without biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=False),\n nn.ReLU(),\n nn.Linear(5, 8, bias=False),\n nn.ReLU(),\n nn.Linear(8, 6, bias=False)\n )\n self.linear = nn.Linear(6, 4, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinearB(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, with biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=True),\n nn.ReLU(),\n nn.Linear(5, 8, bias=True),\n nn.ReLU(),\n nn.Linear(8, 6, bias=True)\n )\n self.linear = nn.Linear(6, 4, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinearMixed(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, some with biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=True),\n nn.ReLU(),\n nn.Linear(5, 8, bias=False),\n nn.ReLU(),\n nn.Linear(8, 6, bias=True)\n )\n self.linear = nn.Linear(6, 4, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass Conv2dA(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=False),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dB(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, with biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dC(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, with and without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dBN(nn.Module):\n r\"\"\"Model with Conv2d layers and BatchNorms\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n nn.BatchNorm2d(32)\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)\n self.bn = nn.BatchNorm2d(64)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n x = self.bn(x)\n return x\n\n\nclass SimplePruner(BasePruner):\n def update_mask(self, layer, **kwargs):\n layer.parametrizations.weight[0].pruned_outputs.add(1)\n\n\nclass MultiplePruner(BasePruner):\n def update_mask(self, layer, **kwargs):\n layer.parametrizations.weight[0].pruned_outputs.update([1, 2])\n\n\nclass TestBasePruner(TestCase):\n def _check_pruner_prepared(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n # Check mask exists\n assert hasattr(module, 'mask')\n # Check parametrization exists and is correct\n assert parametrize.is_parametrized(module)\n assert hasattr(module, \"parametrizations\")\n # Assume that this is the 1st/only parametrization\n if isinstance(module, tuple(NEEDS_ZEROS)):\n assert type(module.parametrizations.weight[0]) == ZeroesParametrization\n else:\n assert type(module.parametrizations.weight[0]) == PruningParametrization\n\n def _check_pruner_mask_squashed(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert not hasattr(module, \"parametrizations\")\n assert not hasattr(module, 'mask')\n\n def _check_pruner_valid_before_step(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert module.parametrizations.weight[0].pruned_outputs == set()\n\n def _check_pruner_valid_after_step(self, model, pruner, pruned_set, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert module.parametrizations.weight[0].pruned_outputs == pruned_set\n\n def _test_constructor_on_device(self, model, device):\n self.assertRaisesRegex(TypeError, 'BasePruner .* update_mask',\n BasePruner)\n model1 = copy.deepcopy(model).to(device)\n pruner = SimplePruner(None)\n pruner.prepare(model1, None)\n for g in pruner.module_groups:\n module = g['module']\n assert module.weight.device.type == device.type\n assert len(pruner.module_groups) == 2\n pruner.step()\n # Can instantiate the model with configs\n model2 = copy.deepcopy(model).to(device)\n pruner = SimplePruner({'test': 3})\n pruner.prepare(model2, [model2.linear])\n assert len(pruner.module_groups) == 1\n assert pruner.module_groups[0]['fqn'] == 'linear'\n assert 'test' in pruner.module_groups[0]\n assert pruner.module_groups[0]['test'] == 3\n\n def test_constructor(self):\n model = Linear()\n for device in DEVICES:\n self._test_constructor_on_device(model, torch.device(device))\n\n def _test_prepare_linear_on_device(self, model, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones(128, 16, device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_prepared(model, pruner, device)\n assert model(x).shape == (128, 16)\n\n def test_prepare_linear(self):\n models = [Linear(), LinearB()] # without and with bias\n for device in DEVICES:\n for model in models:\n self._test_prepare_linear_on_device(model, torch.device(device))\n\n def _test_prepare_conv2d_on_device(self, model, config, device):\n x = torch.ones((1, 1, 28, 28), device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n self._check_pruner_prepared(model, pruner, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_prepare_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n model = model.to(device)\n self._test_prepare_conv2d_on_device(model, config, torch.device(device))\n\n def _test_squash_mask_linear_on_device(self, model, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones(128, 16, device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n pruner.squash_mask()\n self._check_pruner_mask_squashed(model, pruner, device)\n assert model(x).shape == (128, 16)\n\n def test_squash_mask_linear(self):\n models = [Linear(), LinearB()] # without and with bias\n for device in DEVICES:\n for model in models:\n self._test_squash_mask_linear_on_device(model, torch.device(device))\n\n def _test_squash_mask_conv2d_on_device(self, model, config, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones((1, 1, 28, 28), device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n pruner.squash_mask()\n self._check_pruner_mask_squashed(model, pruner, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_squash_mask_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n model = model.to(device)\n self._test_squash_mask_conv2d_on_device(model, config, torch.device(device))\n\n def _test_step_linear_on_device(self, model, is_basic, device):\n model = model.to(device)\n if is_basic:\n x = torch.ones(16, 16)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n self._check_pruner_valid_after_step(model, pruner, {1}, device)\n else:\n x = torch.ones(7, 7)\n pruner = MultiplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n self._check_pruner_valid_after_step(model, pruner, {1, 2}, device)\n\n def test_step_linear(self):\n basic_models = [Linear(), LinearB()]\n complex_models = [MultipleLinear(), MultipleLinearB(), MultipleLinearMixed()]\n for device in DEVICES:\n for model in basic_models:\n self._test_step_linear_on_device(model, True, torch.device(device))\n for model in complex_models:\n self._test_step_linear_on_device(model, False, torch.device(device))\n\n def _test_step_conv2d_on_device(self, model, config, device):\n model = model.to(device)\n x = torch.ones((1, 1, 28, 28)).to(device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n if type(model) is Conv2dBN:\n assert pruner.get_module_pruned_outputs(model.seq[1]) == pruner.get_module_pruned_outputs(model.seq[0])\n assert pruner.get_module_pruned_outputs(model.bn) == pruner.get_module_pruned_outputs(model.conv2d)\n self._check_pruner_valid_after_step(model, pruner, {1}, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_step_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]),\n (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n self._test_step_conv2d_on_device(model, config, torch.device(device))\n", "# Owner(s): [\"oncall: fx\"]\n\nimport torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.nn as nn\nfrom torch.testing._internal.common_fx2trt import AccTestCase\nfrom parameterized import parameterized\n\n\nclass TestGetitemConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"slice_batch_dim\", slice(None, None, None)),\n (\"slice_basic\", (slice(None, None, None), slice(0, 3, 2))),\n (\"slice_full\", (slice(None, None, None), slice(0, 10, 3))),\n (\"ellipsis\", (slice(None, None, None), ..., slice(0, 3, 2))),\n (\n \"slice_all_none\",\n (slice(None, None, None), slice(None, None, None)),\n ),\n (\n \"slice_start_none\",\n (slice(None, None, None), slice(None, 2, 1)),\n ),\n (\"slice_end_none\", (slice(None, None, None), slice(1, None, 1))),\n (\n \"slice_step_none\",\n (slice(None, None, None), slice(0, 3, None)),\n ),\n (\"slice_neg_idx\", (slice(None, None, None), -1)),\n (\"slice_neg_slice\", (slice(None, None, None), slice(-8, -2, 3))),\n (\"multi_dim\", (slice(None, None, None), 0, 1)),\n (\n \"slice_multi_dim\",\n (slice(None, None, None), slice(0, 3, 2), slice(1, -1, 3)),\n ),\n (\n \"none\",\n (slice(None, None, None), None, slice(1, -1, 3), 1),\n ),\n ]\n )\n def test_getitem(self, _, idx):\n class Getitem(nn.Module):\n def __init__(self, idx):\n super().__init__()\n self.idx = idx\n\n def forward(self, x):\n x = x + x\n return x[self.idx]\n\n inputs = [torch.randn(2, 10, 10, 10)]\n self.run_test(Getitem(idx), inputs, expected_ops={acc_ops.getitem})\n" ]
[ [ "torch.nn.BatchNorm2d", "torch.ones", "torch.nn.Linear", "torch.nn.ReLU", "torch.cuda.is_available", "torch.nn.Conv2d", "torch.nn.utils.parametrize.is_parametrized", "torch.device" ], [ "torch.randn" ] ]
RobinRojowiec/intent-recognition-in-doctor-patient-interviews
[ "b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078" ]
[ "models/siamese_neural_network.py" ]
[ "import random\n\nimport torch\nimport torch.nn as nn\n\nfrom models.cnn_layer import CNNLayer\nfrom utility.model_parameter import ModelParameter, Configuration\n\n\nclass SiameseNeuralNetwork(nn.Module):\n def __init__(self, config: Configuration, label_count=64, device=torch.device('cpu'), *args, **kwargs):\n super(SiameseNeuralNetwork, self).__init__()\n\n # set parameters\n self.max_length = config.get_int(ModelParameter.MAX_LENGTH)\n self.device = device\n\n # create and initialize layers\n self.cnn_layer = CNNLayer(config)\n self.distance_measure = nn.CosineSimilarity()\n\n def distance(self, a, b):\n return self.distance_measure(a, b)\n\n def get_output_dim(self):\n return self.cnn_layer.get_output_length()\n\n def forward(self, sample, previous_classes, positions, previous_sample, sample_pos, *sample_neg, **kwargs):\n n_negative = len(sample_neg)\n selected_negative = sample_neg[random.randint(0, n_negative - 1)]\n return self.compare(sample, sample_pos, mode=kwargs[\"mode\"]), self.compare(sample, selected_negative,\n mode=kwargs[\"mode\"])\n\n def get_features(self, sample):\n return self.cnn_layer(sample)\n\n def compare(self, sample_1, sample_2, mode=\"train\", **kwargs):\n encoded_sample_1 = self.cnn_layer(sample_1)\n encoded_sample_2 = self.cnn_layer(sample_2)\n\n return self.distance(encoded_sample_1, encoded_sample_2)\n" ]
[ [ "torch.device", "torch.nn.CosineSimilarity" ] ]
ujjaldas132/models
[ "e13441ed200ce1bb204977e731508748bd0e0d14" ]
[ "official/recommendation/ncf_test.py" ]
[ "# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests NCF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport unittest\n\nimport mock\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.recommendation import constants as rconst\nfrom official.recommendation import data_pipeline\nfrom official.recommendation import neumf_model\nfrom official.recommendation import ncf_common\nfrom official.recommendation import ncf_estimator_main\nfrom official.recommendation import ncf_keras_main\nfrom official.utils.misc import keras_utils\nfrom official.utils.testing import integration\n\nfrom tensorflow.python.eager import context # pylint: disable=ungrouped-imports\n\n\nNUM_TRAIN_NEG = 4\n\n\nclass NcfTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(NcfTest, cls).setUpClass()\n ncf_common.define_ncf_flags()\n\n def setUp(self):\n self.top_k_old = rconst.TOP_K\n self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES\n rconst.NUM_EVAL_NEGATIVES = 2\n\n def tearDown(self):\n rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old\n rconst.TOP_K = self.top_k_old\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,\n top_k=rconst.TOP_K, match_mlperf=False):\n rconst.TOP_K = top_k\n rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1\n batch_size = items_by_user.shape[0]\n\n users = np.repeat(np.arange(batch_size)[:, np.newaxis],\n rconst.NUM_EVAL_NEGATIVES + 1, axis=1)\n users, items, duplicate_mask = \\\n data_pipeline.BaseDataConstructor._assemble_eval_batch(\n users, items_by_user[:, -1:], items_by_user[:, :-1], batch_size)\n\n g = tf.Graph()\n with g.as_default():\n logits = tf.convert_to_tensor(\n predicted_scores_by_user.reshape((-1, 1)), tf.float32)\n softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),\n logits], axis=1)\n duplicate_mask = tf.convert_to_tensor(duplicate_mask, tf.float32)\n\n metric_ops = neumf_model._get_estimator_spec_with_metrics(\n logits=logits, softmax_logits=softmax_logits,\n duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,\n match_mlperf=match_mlperf).eval_metric_ops\n\n hr = metric_ops[rconst.HR_KEY]\n ndcg = metric_ops[rconst.NDCG_KEY]\n\n init = [tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer()]\n\n with self.session(graph=g) as sess:\n sess.run(init)\n return sess.run([hr[1], ndcg[1]])\n\n def test_hit_rate_and_ndcg(self):\n # Test with no duplicate items\n predictions = np.array([\n [2., 0., 1.], # In top 2\n [1., 0., 2.], # In top 1\n [2., 1., 0.], # In top 3\n [3., 4., 2.] # In top 3\n ])\n items = np.array([\n [2, 3, 1],\n [3, 1, 2],\n [2, 1, 3],\n [1, 3, 2],\n ])\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items. In the MLPerf case, we treat the duplicates as\n # a single item. Otherwise, we treat the duplicates as separate items.\n predictions = np.array([\n [2., 2., 3., 1.], # In top 4. MLPerf: In top 3\n [1., 0., 2., 3.], # In top 1. MLPerf: In top 1\n [2., 3., 2., 0.], # In top 4. MLPerf: In top 3\n [2., 4., 2., 3.] # In top 2. MLPerf: In top 2\n ])\n items = np.array([\n [2, 2, 3, 1],\n [2, 3, 4, 1],\n [2, 3, 2, 1],\n [3, 2, 1, 4],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(5)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n _BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1']\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_estimator(self):\n integration.run_synthetic(\n ncf_estimator_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS)\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_estimator_mlperf(self):\n integration.run_synthetic(\n ncf_estimator_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-ml_perf', 'True'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_keras_no_dist_strat(self):\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS +\n ['-distribution_strategy', 'off'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_dist_strat(self):\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_dist_strat_ctl(self):\n flags = (self._BASE_END_TO_END_FLAGS +\n ['-num_gpus', '0'] +\n ['-keras_use_ctl', 'True'])\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=flags)\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_1_gpu_dist_strat(self):\n if context.num_gpus() < 1:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(1, context.num_gpus()))\n\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_2_gpu(self):\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '2'])\n\nif __name__ == \"__main__\":\n tf.test.main()\n" ]
[ [ "tensorflow.zeros", "tensorflow.test.main", "tensorflow.compat.v1.local_variables_initializer", "tensorflow.Graph", "numpy.arange", "tensorflow.compat.v1.global_variables_initializer", "tensorflow.convert_to_tensor", "numpy.array", "tensorflow.python.eager.context.num_gpus" ] ]