repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
gunho1123/models | [
"a5d63cd3c28e30476ea53f1a5c3d13370926054d"
] | [
"official/projects/yolact/modeling/yolact_model.py"
] | [
"# Copyright 2021 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Build simclr models.\"\"\"\nfrom typing import List, Optional, Mapping\nfrom absl import logging\n\nimport tensorflow as tf\n\nfrom official.vision.beta.ops import anchor\n\nlayers = tf.keras.layers\n\n\[email protected]_keras_serializable(package='yolact')\nclass YolactModel(tf.keras.Model):\n \"\"\"A classification model based on SimCLR framework.\"\"\"\n\n def __init__(self,\n backbone: tf.keras.models.Model,\n decoder: tf.keras.models.Model,\n prediction_head: tf.keras.layers.Layer,\n protonet: tf.keras.layers.Layer,\n detection_generator: tf.keras.layers.Layer,\n min_level: int,\n max_level: int,\n num_scales: int,\n aspect_ratios: List[float],\n anchor_size: float,\n **kwargs):\n \"\"\"A classification model based on SimCLR framework.\n Args:\n backbone: a backbone network.\n input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.\n mode: `str` indicates mode of training to be executed.\n backbone_trainable: `bool` whether the backbone is trainable or not.\n **kwargs: keyword arguments to be passed.\n \"\"\"\n super(YolactModel, self).__init__(**kwargs)\n self._config_dict = {\n 'backbone': backbone,\n 'decoder': decoder,\n 'prediction_head': prediction_head,\n 'protonet': protonet,\n 'detection_generator': detection_generator,\n 'min_level': min_level,\n 'max_level': max_level,\n 'num_scales': num_scales,\n 'aspect_ratios': aspect_ratios,\n 'anchor_size': anchor_size,\n }\n self.backbone = backbone\n self.decoder = decoder\n self.prediction_head = prediction_head\n self.protonet = protonet\n self.detection_generator = detection_generator\n\n def call(self,\n inputs: tf.Tensor,\n image_shape: Optional[tf.Tensor] = None,\n anchor_boxes: Optional[Mapping[str, tf.Tensor]] = None,\n training=None,\n **kwargs):\n \"\"\"Forward pass of the YOLACT model.\n\n Args:\n images: `Tensor`, the input batched images, whose shape is\n [batch, height, width, 3].\n image_shape: `Tensor`, the actual shape of the input images, whose shape\n is [batch, 2] where the last dimension is [height, width]. Note that\n this is the actual image shape excluding paddings. For example, images\n in the batch may be resized into different shapes before padding to the\n fixed size.\n anchor_boxes: a dict of tensors which includes multilevel anchors.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the anchor coordinates of a particular feature\n level, whose shape is [height_l, width_l, num_anchors_per_location].\n training: `bool`, indicating whether it is in training mode.\n\n Returns:\n scores: a dict of tensors which includes scores of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the box scores predicted from a particular feature\n level, whose shape is\n [batch, height_l, width_l, num_classes * num_anchors_per_location].\n boxes: a dict of tensors which includes coordinates of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the box coordinates predicted from a particular\n feature level, whose shape is\n [batch, height_l, width_l, 4 * num_anchors_per_location].\n masks: a dict of tensors which includes mask coefficients of the predictions.\n - key: `str`, the level of the multilevel predictions.\n - values: `Tensor`, the mask coefficients predicted from a particular\n feature level, whose shape is\n [batch, height_l, width_l, k * num_anchors_per_location].\n protonet_features: `Tensor`, the protonet features, whose shape is\n [batch, height_2, width_2, k].\n \"\"\"\n outputs = {}\n backbone_features = self.backbone(inputs)\n decoder_features = self.decoder(backbone_features)\n levels = sorted(decoder_features.keys()) # Ascending order\n\n raw_scores, raw_boxes, raw_masks = self.prediction_head(decoder_features)\n protonet_features = self.protonet(decoder_features[levels[0]])\n\n if training:\n outputs.update({\n 'cls_outputs': raw_scores,\n 'box_outputs': raw_boxes,\n 'mask_outputs': raw_masks,\n 'protonet_features': protonet_features,\n })\n else:\n # Generate anchor boxes for this batch if not provided.\n if anchor_boxes is None:\n _, image_height, image_width, _ = inputs.get_shape().as_list()\n anchor_boxes = anchor.Anchor(\n min_level=self._config_dict['min_level'],\n max_level=self._config_dict['max_level'],\n num_scales=self._config_dict['num_scales'],\n aspect_ratios=self._config_dict['aspect_ratios'],\n anchor_size=self._config_dict['anchor_size'],\n image_size=(image_height, image_width)).multilevel_boxes\n for l in anchor_boxes:\n anchor_boxes[l] = tf.tile(\n tf.expand_dims(anchor_boxes[l], axis=0),\n [tf.shape(inputs)[0], 1, 1, 1])\n # Post-processing.\n raw_attributes = {\n 'raw_masks' : raw_masks,\n }\n final_results = self.detection_generator(raw_boxes, raw_scores,\n anchor_boxes, image_shape,\n raw_attributes)\n\n outputs.update({\n 'cls_outputs': raw_scores,\n 'box_outputs': raw_boxes,\n 'mask_outputs': raw_masks,\n 'protonet_features': protonet_features,\n })\n\n outputs.update({\n 'detection_boxes': final_results['detection_boxes'],\n 'detection_scores': final_results['detection_scores'],\n 'detection_classes': final_results['detection_classes'],\n 'num_detections': final_results['num_detections']\n })\n\n final_mask_coefficients = final_results['detection_attributes']['raw_masks']\n batch, proto_height, proto_width, proto_channel = protonet_features.get_shape().as_list()\n protonet_features = tf.reshape(protonet_features, [batch, -1, proto_channel]) # [batch, H*W, 32]\n assembled_masks = tf.matmul(\n a=protonet_features, # [batch, proto_height*proto_width, proto_channel]\n b=final_mask_coefficients, # [batch, max_num_instances, 32]\n transpose_b=True)\n assembled_masks = tf.reshape(assembled_masks, [batch, proto_height, proto_width, -1])\n assembled_masks = tf.transpose(assembled_masks, perm=[0,3,1,2])\n # [batch, max_num_instances, proto_height, proto_width]\n\n outputs['detection_masks'] = assembled_masks\n\n \n return outputs\n\n @property\n def checkpoint_items(self):\n \"\"\"Returns a dictionary of items to be additionally checkpointed.\"\"\"\n items = dict(backbone=self.backbone)\n items.update(decoder=self.decoder)\n return items\n\n def get_config(self):\n return self._config_dict\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n return cls(**config)\n"
] | [
[
"tensorflow.matmul",
"tensorflow.transpose",
"tensorflow.shape",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.keras.utils.register_keras_serializable"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
derekeverett/stat_model_surrogates | [
"48b140fe89ac1ffa5a7d5733337a5fa519f95d6f"
] | [
"Bayesian_NN_regression/nn_grid_search.py"
] | [
"import pickle\nimport sklearn\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom warnings import filterwarnings\nfilterwarnings('ignore')\n\nimport seaborn as sns\nsns.set()\nfrom pandas.plotting import scatter_matrix\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.preprocessing import scale\nfrom sklearn.model_selection import train_test_split\n\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras import layers\n\nfrom calculations_load import *\nfrom configurations import *\n\n# Get all the observables list\n\nnobs = 0\nobservables = []\nobs_name = []\n\nfor obs, cent_list in obs_cent_list['Pb-Pb-2760'].items():\n if obs not in active_obs_list['Pb-Pb-2760']:\n continue\n observables.append(obs)\n n = np.array(cent_list).shape[0]\n for i in cent_list:\n obs_name.append(f'{obs}_{i}')\n #self._slices[obs] = slice(self.nobs, self.nobs + n)\n nobs += n\n\nsystem_str = 'Pb-Pb-2760'\ndesign_file = 'production_designs/500pts/design_pts_Pb_Pb_2760_production/design_points_main_PbPb-2760.dat'\ndesign = pd.read_csv(design_file)\ndesign = design.drop(\"idx\", axis=1)\n\n#delete bad design points\ndrop_indices = list(delete_design_pts_set)\ndesign = design.drop(drop_indices)\n\n#choose features (inputs)\n#feature_cols = ['norm', 'trento_p'] #specific choices\nfeature_cols = design.keys().values #all of them\nn_features = len(feature_cols)\n\nX = design[feature_cols]\n\nn_design = SystemsInfo[\"Pb-Pb-2760\"][\"n_design\"]\nnpt = n_design - len(delete_design_pts_set)\nobs = 'dNch_deta' #choose the observable we want to emulate\n\nY = np.array([])\n\nfor pt in range(npt):\n for obs in active_obs_list['Pb-Pb-2760']:\n Y = np.append( Y, trimmed_model_data[system_str][pt, idf][obs]['mean'][:], axis=0)\nY = Y.reshape(X.shape[0], -1)\n\n\nprint( \"X.shape : \"+ str(X.shape) )\nprint( \"Y.shape : \"+ str(Y.shape) )\n\n#Scaling the inputs\n\nX_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=.2)\n\n#X_scaler = StandardScaler().fit(X_train)\n#Y_scaler = StandardScaler().fit(Y_train)\nX_scaler = MinMaxScaler(feature_range=(-1, 1)).fit(X_train)\nY_scaler = MinMaxScaler(feature_range=(-1, 1)).fit(Y_train)\n\nX_train_sc = X_scaler.transform(X_train)\nX_test_sc = X_scaler.transform(X_test)\n\nY_train_sc = Y_scaler.transform(Y_train)\nY_test_sc = Y_scaler.transform(Y_test)\n\n\n#Building NN model\n\nfrom keras.models import Model\nfrom keras.layers import Flatten, Input, Dense, Dropout ,Conv1D\ndef model_fn(ly1_units=20,activation_1='sigmoid',activation_2='tanh',ly2_units=20,activation_3='tanh',\\\n dropout_rate1 = 0.1,dropout_rate2 = 0.1,loss_fn=\"huber_loss\", krnl_sz=5,\\\n optimizer='adam'):\n inputs = Input(shape=(X.shape[1],1))\n x = Dense(ly1_units, activation=activation_1)(inputs)\n # print(x.shape)\n x= Conv1D(filters=1,kernel_size=krnl_sz)(x)\n x= Flatten()(x)\n x = Dropout(dropout_rate1)(x, training=True)\n x = Dense(ly2_units, activation=activation_2)(x)\n x = Dropout(dropout_rate2)(x, training=True)\n x = Dense(Y.shape[1], activation=activation_3)(x)\n outputs = x\n model = Model(inputs, outputs)\n#model.compile(loss=\"mean_squared_error\", optimizer='adam')\n model.compile(loss=loss_fn, optimizer=optimizer)\n model.summary()\n return model\n\n\n#initiate models\n\nmodel=model_fn()\n\n#reshape inputs\n\ntrain_tf_X=np.expand_dims(X_train_sc,axis=2)\n\n#Grid search\n\nfrom sklearn.model_selection import GridSearchCV\nly_units=[50,100,200,500]\nactivation_1=['sigmoid','tanh']\nactivation_2=['linear','tanh']\ndropout_rate = [0.2, 0.3, 0.5]\nkrnl_sz=[5,10,20,40]\nloss_fn=[\"mse\",\"huber_loss\"]\noptimizer=['adam']\nbatch_size=[10, 20, 50]\nestimator=tf.keras.wrappers.scikit_learn.KerasRegressor(build_fn=model_fn)\nparam_grid = dict(ly1_units=ly_units,ly2_units=ly_units,activation_1=activation_1, activation_2=activation_1,\\\n activation_3=activation_2,dropout_rate1=dropout_rate,dropout_rate2=dropout_rate,\\\n loss_fn=loss_fn,optimizer=optimizer,batch_size=batch_size,krnl_sz=krnl_sz)\ngrid = GridSearchCV(estimator=estimator, param_grid=param_grid, n_jobs=-1, cv=2, scoring='r2',verbose=20)\ngrid_result = grid.fit(train_tf_X, Y_train_sc,epochs=300,verbose=0)\n\nprint(f'The best set of hyperparameters are{grid_result.best_params_}')\n\nfile = open('grid_search_results.pkl', 'wb')\n\npickle.dump(grid_result, file)\n"
] | [
[
"sklearn.model_selection.GridSearchCV",
"numpy.expand_dims",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.wrappers.scikit_learn.KerasRegressor",
"numpy.append",
"numpy.array",
"sklearn.preprocessing.MinMaxScaler"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
themantalope/MONAI | [
"f398298b5aadc076102261a687a158f6ac17ad1c",
"f398298b5aadc076102261a687a158f6ac17ad1c",
"9378e52b9c2283fa71cf8572b08f274071753053",
"f398298b5aadc076102261a687a158f6ac17ad1c",
"f398298b5aadc076102261a687a158f6ac17ad1c"
] | [
"tests/test_patch_wsi_dataset.py",
"tests/test_shift_intensityd.py",
"tests/test_rotate90.py",
"tests/test_integration_classification_2d.py",
"monai/engines/workflow.py"
] | [
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nfrom unittest import skipUnless\n\nimport numpy as np\nfrom numpy.testing import assert_array_equal\nfrom parameterized import parameterized\n\nfrom monai.apps.pathology.data import PatchWSIDataset\nfrom monai.apps.utils import download_url\nfrom monai.utils import optional_import\n\n_, has_cim = optional_import(\"cucim\")\n_, has_osl = optional_import(\"openslide\")\n\nFILE_URL = \"http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff\"\nFILE_PATH = os.path.join(os.path.dirname(__file__), \"testing_data\", \"temp_\" + os.path.basename(FILE_URL))\n\nTEST_CASE_0 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": (1, 1),\n \"grid_shape\": (1, 1),\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_1 = [\n {\n \"data\": [{\"image\": FILE_PATH, \"location\": [10004, 20004], \"label\": [0, 0, 0, 1]}],\n \"region_size\": (8, 8),\n \"grid_shape\": (2, 2),\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_2 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": 1,\n \"grid_shape\": 1,\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_3 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [[[0, 1], [1, 0]]]},\n ],\n \"region_size\": 1,\n \"grid_shape\": 1,\n \"patch_size\": 1,\n \"image_reader_name\": \"cuCIM\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[0, 1], [1, 0]]])},\n ],\n]\n\nTEST_CASE_OPENSLIDE_0 = [\n {\n \"data\": [\n {\"image\": FILE_PATH, \"location\": [0, 0], \"label\": [1]},\n ],\n \"region_size\": (1, 1),\n \"grid_shape\": (1, 1),\n \"patch_size\": 1,\n \"image_reader_name\": \"OpenSlide\",\n },\n [\n {\"image\": np.array([[[239]], [[239]], [[239]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\nTEST_CASE_OPENSLIDE_1 = [\n {\n \"data\": [{\"image\": FILE_PATH, \"location\": [10004, 20004], \"label\": [0, 0, 0, 1]}],\n \"region_size\": (8, 8),\n \"grid_shape\": (2, 2),\n \"patch_size\": 1,\n \"image_reader_name\": \"OpenSlide\",\n },\n [\n {\"image\": np.array([[[247]], [[245]], [[248]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[245]], [[247]], [[244]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[0]]])},\n {\"image\": np.array([[[246]], [[246]], [[246]]], dtype=np.uint8), \"label\": np.array([[[1]]])},\n ],\n]\n\n\nclass TestPatchWSIDataset(unittest.TestCase):\n def setUp(self):\n download_url(FILE_URL, FILE_PATH, \"5a3cfd4fd725c50578ddb80b517b759f\")\n\n @parameterized.expand(\n [\n TEST_CASE_0,\n TEST_CASE_1,\n TEST_CASE_2,\n TEST_CASE_3,\n ]\n )\n @skipUnless(has_cim, \"Requires CuCIM\")\n def test_read_patches_cucim(self, input_parameters, expected):\n dataset = PatchWSIDataset(**input_parameters)\n samples = dataset[0]\n for i in range(len(samples)):\n self.assertTupleEqual(samples[i][\"label\"].shape, expected[i][\"label\"].shape)\n self.assertTupleEqual(samples[i][\"image\"].shape, expected[i][\"image\"].shape)\n self.assertIsNone(assert_array_equal(samples[i][\"label\"], expected[i][\"label\"]))\n self.assertIsNone(assert_array_equal(samples[i][\"image\"], expected[i][\"image\"]))\n\n @parameterized.expand(\n [\n TEST_CASE_OPENSLIDE_0,\n TEST_CASE_OPENSLIDE_1,\n ]\n )\n @skipUnless(has_osl, \"Requires OpenSlide\")\n def test_read_patches_openslide(self, input_parameters, expected):\n dataset = PatchWSIDataset(**input_parameters)\n samples = dataset[0]\n for i in range(len(samples)):\n self.assertTupleEqual(samples[i][\"label\"].shape, expected[i][\"label\"].shape)\n self.assertTupleEqual(samples[i][\"image\"].shape, expected[i][\"image\"].shape)\n self.assertIsNone(assert_array_equal(samples[i][\"label\"], expected[i][\"label\"]))\n self.assertIsNone(assert_array_equal(samples[i][\"image\"], expected[i][\"image\"]))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\n\nfrom monai.transforms import IntensityStatsd, ShiftIntensityd\nfrom tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose\n\n\nclass TestShiftIntensityd(NumpyImageTestCase2D):\n def test_value(self):\n key = \"img\"\n for p in TEST_NDARRAYS:\n shifter = ShiftIntensityd(keys=[key], offset=1.0)\n result = shifter({key: p(self.imt)})\n expected = self.imt + 1.0\n assert_allclose(result[key], expected)\n\n def test_factor(self):\n key = \"img\"\n stats = IntensityStatsd(keys=key, ops=\"max\", key_prefix=\"orig\")\n shifter = ShiftIntensityd(keys=[key], offset=1.0, factor_key=[\"orig_max\"])\n data = {key: self.imt, key + \"_meta_dict\": {\"affine\": None}}\n\n result = shifter(stats(data))\n expected = self.imt + 1.0 * np.nanmax(self.imt)\n np.testing.assert_allclose(result[key], expected)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest\n\nimport numpy as np\n\nfrom monai.transforms import Rotate90\nfrom tests.utils import TEST_NDARRAYS, NumpyImageTestCase2D, assert_allclose\n\n\nclass TestRotate90(NumpyImageTestCase2D):\n def test_rotate90_default(self):\n rotate = Rotate90()\n for p in TEST_NDARRAYS:\n rotated = rotate(p(self.imt[0]))\n expected = []\n for channel in self.imt[0]:\n expected.append(np.rot90(channel, 1, (0, 1)))\n expected = np.stack(expected)\n assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8)\n\n def test_k(self):\n rotate = Rotate90(k=2)\n for p in TEST_NDARRAYS:\n rotated = rotate(p(self.imt[0]))\n expected = []\n for channel in self.imt[0]:\n expected.append(np.rot90(channel, 2, (0, 1)))\n expected = np.stack(expected)\n assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8)\n\n def test_spatial_axes(self):\n rotate = Rotate90(spatial_axes=(0, -1))\n for p in TEST_NDARRAYS:\n rotated = rotate(p(self.imt[0]))\n expected = []\n for channel in self.imt[0]:\n expected.append(np.rot90(channel, 1, (0, -1)))\n expected = np.stack(expected)\n assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8)\n\n def test_prob_k_spatial_axes(self):\n rotate = Rotate90(k=2, spatial_axes=(0, 1))\n for p in TEST_NDARRAYS:\n rotated = rotate(p(self.imt[0]))\n expected = []\n for channel in self.imt[0]:\n expected.append(np.rot90(channel, 2, (0, 1)))\n expected = np.stack(expected)\n assert_allclose(rotated, expected, rtol=1.0e-5, atol=1.0e-8)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport unittest\nimport warnings\nfrom urllib.error import ContentTooShortError, HTTPError\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport monai\nfrom monai.apps import download_and_extract\nfrom monai.data import decollate_batch\nfrom monai.metrics import ROCAUCMetric\nfrom monai.networks import eval_mode\nfrom monai.networks.nets import DenseNet121\nfrom monai.transforms import (\n Activations,\n AddChannel,\n AsDiscrete,\n Compose,\n LoadImage,\n RandFlip,\n RandRotate,\n RandZoom,\n ScaleIntensity,\n ToTensor,\n Transpose,\n)\nfrom monai.utils import set_determinism\nfrom tests.testing_data.integration_answers import test_integration_value\nfrom tests.utils import DistTestCase, TimedCall, skip_if_quick\n\nTEST_DATA_URL = \"https://drive.google.com/uc?id=1QsnnkvZyJPcbRoV_ArW8SnE1OTuoVbKE\"\nMD5_VALUE = \"0bc7306e7427e00ad1c5526a6677552d\"\nTASK = \"integration_classification_2d\"\n\n\nclass MedNISTDataset(torch.utils.data.Dataset):\n def __init__(self, image_files, labels, transforms):\n self.image_files = image_files\n self.labels = labels\n self.transforms = transforms\n\n def __len__(self):\n return len(self.image_files)\n\n def __getitem__(self, index):\n return self.transforms(self.image_files[index]), self.labels[index]\n\n\ndef run_training_test(root_dir, train_x, train_y, val_x, val_y, device=\"cuda:0\", num_workers=10):\n\n monai.config.print_config()\n # define transforms for image and classification\n train_transforms = Compose(\n [\n LoadImage(image_only=True),\n AddChannel(),\n Transpose(indices=[0, 2, 1]),\n ScaleIntensity(),\n RandRotate(range_x=np.pi / 12, prob=0.5, keep_size=True),\n RandFlip(spatial_axis=0, prob=0.5),\n RandZoom(min_zoom=0.9, max_zoom=1.1, prob=0.5),\n ToTensor(),\n ]\n )\n train_transforms.set_random_state(1234)\n val_transforms = Compose(\n [LoadImage(image_only=True), AddChannel(), Transpose(indices=[0, 2, 1]), ScaleIntensity(), ToTensor()]\n )\n y_pred_trans = Compose([ToTensor(), Activations(softmax=True)])\n y_trans = Compose([ToTensor(), AsDiscrete(to_onehot=True, num_classes=len(np.unique(train_y)))])\n auc_metric = ROCAUCMetric()\n\n # create train, val data loaders\n train_ds = MedNISTDataset(train_x, train_y, train_transforms)\n train_loader = DataLoader(train_ds, batch_size=300, shuffle=True, num_workers=num_workers)\n\n val_ds = MedNISTDataset(val_x, val_y, val_transforms)\n val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)\n\n model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(train_y))).to(device)\n loss_function = torch.nn.CrossEntropyLoss()\n optimizer = torch.optim.Adam(model.parameters(), 1e-5)\n epoch_num = 4\n val_interval = 1\n\n # start training validation\n best_metric = -1\n best_metric_epoch = -1\n epoch_loss_values = []\n metric_values = []\n model_filename = os.path.join(root_dir, \"best_metric_model.pth\")\n for epoch in range(epoch_num):\n print(\"-\" * 10)\n print(f\"Epoch {epoch + 1}/{epoch_num}\")\n model.train()\n epoch_loss = 0\n step = 0\n for batch_data in train_loader:\n step += 1\n inputs, labels = batch_data[0].to(device), batch_data[1].to(device)\n optimizer.zero_grad()\n outputs = model(inputs)\n loss = loss_function(outputs, labels)\n loss.backward()\n optimizer.step()\n epoch_loss += loss.item()\n epoch_loss /= step\n epoch_loss_values.append(epoch_loss)\n print(f\"epoch {epoch + 1} average loss:{epoch_loss:0.4f}\")\n\n if (epoch + 1) % val_interval == 0:\n with eval_mode(model):\n y_pred = torch.tensor([], dtype=torch.float32, device=device)\n y = torch.tensor([], dtype=torch.long, device=device)\n for val_data in val_loader:\n val_images, val_labels = val_data[0].to(device), val_data[1].to(device)\n y_pred = torch.cat([y_pred, model(val_images)], dim=0)\n y = torch.cat([y, val_labels], dim=0)\n\n # compute accuracy\n acc_value = torch.eq(y_pred.argmax(dim=1), y)\n acc_metric = acc_value.sum().item() / len(acc_value)\n # decollate prediction and label and execute post processing\n y_pred = [y_pred_trans(i) for i in decollate_batch(y_pred)]\n y = [y_trans(i) for i in decollate_batch(y)]\n # compute AUC\n auc_metric(y_pred, y)\n auc_value = auc_metric.aggregate()\n auc_metric.reset()\n metric_values.append(auc_value)\n if auc_value > best_metric:\n best_metric = auc_value\n best_metric_epoch = epoch + 1\n torch.save(model.state_dict(), model_filename)\n print(\"saved new best metric model\")\n print(\n f\"current epoch {epoch +1} current AUC: {auc_value:0.4f} \"\n f\"current accuracy: {acc_metric:0.4f} best AUC: {best_metric:0.4f} at epoch {best_metric_epoch}\"\n )\n print(f\"train completed, best_metric: {best_metric:0.4f} at epoch: {best_metric_epoch}\")\n return epoch_loss_values, best_metric, best_metric_epoch\n\n\ndef run_inference_test(root_dir, test_x, test_y, device=\"cuda:0\", num_workers=10):\n # define transforms for image and classification\n val_transforms = Compose([LoadImage(image_only=True), AddChannel(), ScaleIntensity(), ToTensor()])\n val_ds = MedNISTDataset(test_x, test_y, val_transforms)\n val_loader = DataLoader(val_ds, batch_size=300, num_workers=num_workers)\n\n model = DenseNet121(spatial_dims=2, in_channels=1, out_channels=len(np.unique(test_y))).to(device)\n\n model_filename = os.path.join(root_dir, \"best_metric_model.pth\")\n model.load_state_dict(torch.load(model_filename))\n y_true = []\n y_pred = []\n with eval_mode(model):\n for test_data in val_loader:\n test_images, test_labels = test_data[0].to(device), test_data[1].to(device)\n pred = model(test_images).argmax(dim=1)\n for i in range(len(pred)):\n y_true.append(test_labels[i].item())\n y_pred.append(pred[i].item())\n tps = [np.sum((np.asarray(y_true) == idx) & (np.asarray(y_pred) == idx)) for idx in np.unique(test_y)]\n return tps\n\n\n@skip_if_quick\nclass IntegrationClassification2D(DistTestCase):\n def setUp(self):\n set_determinism(seed=0)\n self.data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"testing_data\")\n data_dir = os.path.join(self.data_dir, \"MedNIST\")\n dataset_file = os.path.join(self.data_dir, \"MedNIST.tar.gz\")\n\n if not os.path.exists(data_dir):\n try:\n download_and_extract(TEST_DATA_URL, dataset_file, self.data_dir, MD5_VALUE)\n except (ContentTooShortError, HTTPError, RuntimeError) as e:\n print(str(e))\n if isinstance(e, RuntimeError):\n # FIXME: skip MD5 check as current downloading method may fail\n self.assertTrue(str(e).startswith(\"md5 check\"))\n return # skipping this test due the network connection errors\n\n assert os.path.exists(data_dir)\n\n class_names = sorted((x for x in os.listdir(data_dir) if os.path.isdir(os.path.join(data_dir, x))))\n image_files = [\n [os.path.join(data_dir, class_name, x) for x in sorted(os.listdir(os.path.join(data_dir, class_name)))]\n for class_name in class_names\n ]\n image_file_list, image_classes = [], []\n for i, _ in enumerate(class_names):\n image_file_list.extend(image_files[i])\n image_classes.extend([i] * len(image_files[i]))\n\n # split train, val, test\n valid_frac, test_frac = 0.1, 0.1\n self.train_x, self.train_y = [], []\n self.val_x, self.val_y = [], []\n self.test_x, self.test_y = [], []\n for i in range(len(image_classes)):\n rann = np.random.random()\n if rann < valid_frac:\n self.val_x.append(image_file_list[i])\n self.val_y.append(image_classes[i])\n elif rann < test_frac + valid_frac:\n self.test_x.append(image_file_list[i])\n self.test_y.append(image_classes[i])\n else:\n self.train_x.append(image_file_list[i])\n self.train_y.append(image_classes[i])\n\n self.device = \"cuda:0\" if torch.cuda.is_available() else \"cpu:0\"\n\n def tearDown(self):\n set_determinism(seed=None)\n try:\n os.remove(os.path.join(self.data_dir, \"best_metric_model.pth\"))\n except FileNotFoundError:\n warnings.warn(\"not found best_metric_model.pth, training skipped?\")\n pass\n\n def train_and_infer(self, idx=0):\n results = []\n if not os.path.exists(os.path.join(self.data_dir, \"MedNIST\")):\n # skip test if no MedNIST dataset\n return results\n\n set_determinism(seed=0)\n losses, best_metric, best_metric_epoch = run_training_test(\n self.data_dir, self.train_x, self.train_y, self.val_x, self.val_y, device=self.device\n )\n infer_metric = run_inference_test(self.data_dir, self.test_x, self.test_y, device=self.device)\n\n print(f\"integration_classification_2d {losses}\")\n print(\"best metric\", best_metric)\n print(\"infer metric\", infer_metric)\n # check training properties\n self.assertTrue(test_integration_value(TASK, key=\"losses\", data=losses, rtol=1e-2))\n self.assertTrue(test_integration_value(TASK, key=\"best_metric\", data=best_metric, rtol=1e-4))\n np.testing.assert_allclose(best_metric_epoch, 4)\n model_file = os.path.join(self.data_dir, \"best_metric_model.pth\")\n self.assertTrue(os.path.exists(model_file))\n # check inference properties\n self.assertTrue(test_integration_value(TASK, key=\"infer_prop\", data=np.asarray(infer_metric), rtol=1))\n results.extend(losses)\n results.append(best_metric)\n results.extend(infer_metric)\n return results\n\n def test_training(self):\n repeated = []\n for i in range(2):\n results = self.train_and_infer(i)\n repeated.append(results)\n np.testing.assert_allclose(repeated[0], repeated[1])\n\n @TimedCall(seconds=1000, skip_timing=not torch.cuda.is_available(), daemon=False)\n def test_timing(self):\n self.train_and_infer()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n",
"# Copyright 2020 - 2021 MONAI Consortium\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport warnings\nfrom typing import TYPE_CHECKING, Callable, Dict, Iterable, List, Optional, Sequence, Union\n\nimport torch\nimport torch.distributed as dist\nfrom torch.utils.data import DataLoader\nfrom torch.utils.data.distributed import DistributedSampler\n\nfrom monai.config import IgniteInfo\nfrom monai.engines.utils import IterationEvents, default_metric_cmp_fn, default_prepare_batch\nfrom monai.transforms import Decollated, Transform\nfrom monai.utils import ensure_tuple, min_version, optional_import\n\nfrom .utils import engine_apply_transform\n\nIgniteEngine, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\")\nState, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"State\")\nEvents, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Events\")\n\nif TYPE_CHECKING:\n from ignite.engine import Engine, EventEnum\n from ignite.metrics import Metric\nelse:\n Engine, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Engine\")\n Metric, _ = optional_import(\"ignite.metrics\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"Metric\")\n EventEnum, _ = optional_import(\"ignite.engine\", IgniteInfo.OPT_IMPORT_VERSION, min_version, \"EventEnum\")\n\n\nclass Workflow(IgniteEngine): # type: ignore[valid-type, misc] # due to optional_import\n \"\"\"\n Workflow defines the core work process inheriting from Ignite engine.\n All trainer, validator and evaluator share this same workflow as base class,\n because they all can be treated as same Ignite engine loops.\n It initializes all the sharable data in Ignite engine.state.\n And attach additional processing logics to Ignite engine based on Event-Handler mechanism.\n\n Users should consider inheriting from `trainer` or `evaluator` to develop more trainers or evaluators.\n\n Args:\n device: an object representing the device on which to run.\n max_epochs: the total epoch number for engine to run, validator and evaluator have only 1 epoch.\n data_loader: Ignite engine use data_loader to run, must be Iterable or torch.DataLoader.\n epoch_length: number of iterations for one epoch, default to `len(data_loader)`.\n non_blocking: if True and this copy is between CPU and GPU, the copy may occur asynchronously\n with respect to the host. For other cases, this argument has no effect.\n prepare_batch: function to parse image and label for every iteration.\n iteration_update: the callable function for every iteration, expect to accept `engine`\n and `batchdata` as input parameters. if not provided, use `self._iteration()` instead.\n postprocessing: execute additional transformation for the model output data.\n Typically, several Tensor based transforms composed by `Compose`.\n key_metric: compute metric when every iteration completed, and save average value to\n engine.state.metrics when epoch completed. key_metric is the main metric to compare and save the\n checkpoint into files.\n additional_metrics: more Ignite metrics that also attach to Ignite Engine.\n metric_cmp_fn: function to compare current key metric with previous best key metric value,\n it must accept 2 args (current_metric, previous_best) and return a bool result: if `True`, will update\n `best_metric` and `best_metric_epoch` with current metric and epoch, default to `greater than`.\n handlers: every handler is a set of Ignite Event-Handlers, must have `attach` function, like:\n CheckpointHandler, StatsHandler, SegmentationSaver, etc.\n amp: whether to enable auto-mixed-precision training or inference, default is False.\n event_names: additional custom ignite events that will register to the engine.\n new events can be a list of str or `ignite.engine.events.EventEnum`.\n event_to_attr: a dictionary to map an event to a state attribute, then add to `engine.state`.\n for more details, check: https://pytorch.org/ignite/generated/ignite.engine.engine.Engine.html\n #ignite.engine.engine.Engine.register_events.\n decollate: whether to decollate the batch-first data to a list of data after model computation,\n recommend `decollate=True` when `postprocessing` uses components from `monai.transforms`.\n default to `True`.\n\n Raises:\n TypeError: When ``device`` is not a ``torch.Device``.\n TypeError: When ``data_loader`` is not a ``torch.utils.data.DataLoader``.\n TypeError: When ``key_metric`` is not a ``Optional[dict]``.\n TypeError: When ``additional_metrics`` is not a ``Optional[dict]``.\n\n \"\"\"\n\n def __init__(\n self,\n device: torch.device,\n max_epochs: int,\n data_loader: Union[Iterable, DataLoader],\n epoch_length: Optional[int] = None,\n non_blocking: bool = False,\n prepare_batch: Callable = default_prepare_batch,\n iteration_update: Optional[Callable] = None,\n postprocessing: Optional[Callable] = None,\n key_metric: Optional[Dict[str, Metric]] = None,\n additional_metrics: Optional[Dict[str, Metric]] = None,\n metric_cmp_fn: Callable = default_metric_cmp_fn,\n handlers: Optional[Sequence] = None,\n amp: bool = False,\n event_names: Optional[List[Union[str, EventEnum]]] = None,\n event_to_attr: Optional[dict] = None,\n decollate: bool = True,\n ) -> None:\n if iteration_update is not None:\n super().__init__(iteration_update)\n else:\n super().__init__(self._iteration)\n if not isinstance(device, torch.device):\n raise TypeError(f\"device must be a torch.device but is {type(device).__name__}.\")\n\n if isinstance(data_loader, DataLoader):\n sampler = data_loader.__dict__[\"sampler\"]\n if isinstance(sampler, DistributedSampler):\n\n @self.on(Events.EPOCH_STARTED)\n def set_sampler_epoch(engine: Engine):\n sampler.set_epoch(engine.state.epoch)\n\n if epoch_length is None:\n epoch_length = len(data_loader)\n else:\n if epoch_length is None:\n raise ValueError(\"if data_loader is not PyTorch DataLoader, must specify the epoch_length.\")\n\n # set all sharable data for the workflow based on Ignite engine.state\n self.state = State(\n rank=dist.get_rank() if dist.is_available() and dist.is_initialized() else 0,\n seed=0,\n iteration=0,\n epoch=0,\n max_epochs=max_epochs,\n epoch_length=epoch_length,\n output=None,\n batch=None,\n metrics={},\n metric_details={},\n dataloader=None,\n device=device,\n key_metric_name=None, # we can set many metrics, only use key_metric to compare and save the best model\n best_metric=-1,\n best_metric_epoch=-1,\n )\n self.data_loader = data_loader\n self.non_blocking = non_blocking\n self.prepare_batch = prepare_batch\n self.metric_cmp_fn = metric_cmp_fn\n self.amp = amp\n self.scaler: Optional[torch.cuda.amp.GradScaler] = None\n\n if event_names is None:\n event_names = [IterationEvents]\n else:\n if not isinstance(event_names, list):\n raise ValueError(\"event_names must be a list or string or EventEnum.\")\n event_names += [IterationEvents]\n for name in event_names:\n if isinstance(name, str):\n self.register_events(name, event_to_attr=event_to_attr)\n elif issubclass(name, EventEnum):\n self.register_events(*name, event_to_attr=event_to_attr)\n else:\n raise ValueError(\"event_names must be a list or string or EventEnum.\")\n\n if decollate:\n self._register_decollate()\n\n if postprocessing is not None:\n if not decollate and isinstance(postprocessing, Transform):\n warnings.warn(\"MONAI transforms expect `channel-first` data, `decollate=False` may not work here.\")\n self._register_postprocessing(postprocessing)\n if key_metric is not None:\n self._register_metrics(key_metric, additional_metrics)\n if handlers is not None:\n self._register_handlers(handlers)\n\n def _register_decollate(self):\n \"\"\"\n Register the decollate operation for batch data, will execute after model forward and loss forward.\n\n \"\"\"\n\n @self.on(IterationEvents.MODEL_COMPLETED)\n def _decollate_data(engine: Engine) -> None:\n # replicate the scalar values to make sure all the items have batch dimension, then decollate\n transform = Decollated(keys=None, detach=True)\n engine.state.batch = transform(engine.state.batch)\n engine.state.output = transform(engine.state.output)\n\n def _register_postprocessing(self, posttrans: Callable):\n \"\"\"\n Register the postprocessing logic to the engine, will execute them as a chain when iteration completed.\n\n \"\"\"\n\n @self.on(IterationEvents.MODEL_COMPLETED)\n def _run_postprocessing(engine: Engine) -> None:\n if not isinstance(engine.state.batch, list) or not isinstance(engine.state.output, list):\n engine.state.batch, engine.state.output = engine_apply_transform(\n batch=engine.state.batch,\n output=engine.state.output,\n transform=posttrans,\n )\n else:\n for i, (b, o) in enumerate(zip(engine.state.batch, engine.state.output)):\n engine.state.batch[i], engine.state.output[i] = engine_apply_transform(b, o, posttrans)\n\n def _register_metrics(self, k_metric: Dict, add_metrics: Optional[Dict] = None):\n \"\"\"\n Register the key metric and additional metrics to the engine, supports ignite Metrics.\n\n \"\"\"\n if not isinstance(k_metric, dict):\n raise TypeError(f\"key_metric must be None or a dict but is {type(k_metric).__name__}.\")\n self.state.key_metric_name = list(k_metric.keys())[0]\n metrics = k_metric\n if add_metrics is not None and len(add_metrics) > 0:\n if not isinstance(add_metrics, dict):\n raise TypeError(f\"additional metrics must be None or a dict but is {type(add_metrics).__name__}.\")\n metrics.update(add_metrics)\n for name, metric in metrics.items():\n metric.attach(self, name)\n\n @self.on(Events.EPOCH_COMPLETED)\n def _compare_metrics(engine: Engine) -> None:\n if engine.state.key_metric_name is not None:\n current_val_metric = engine.state.metrics[engine.state.key_metric_name]\n if self.metric_cmp_fn(current_val_metric, engine.state.best_metric):\n self.logger.info(f\"Got new best metric of {engine.state.key_metric_name}: {current_val_metric}\")\n engine.state.best_metric = current_val_metric\n engine.state.best_metric_epoch = engine.state.epoch\n\n def _register_handlers(self, handlers: Sequence):\n \"\"\"\n Register the handlers to the engine, supports ignite Handlers with `attach` API.\n\n \"\"\"\n handlers_ = ensure_tuple(handlers)\n for handler in handlers_:\n handler.attach(self)\n\n def run(self) -> None:\n \"\"\"\n Execute training, validation or evaluation based on Ignite Engine.\n\n \"\"\"\n super().run(data=self.data_loader, max_epochs=self.state.max_epochs)\n\n def _iteration(self, engine: Engine, batchdata: Dict[str, torch.Tensor]):\n \"\"\"\n Abstract callback function for the processing logic of 1 iteration in Ignite Engine.\n Need subclass to implement different logics, like SupervisedTrainer/Evaluator, GANTrainer, etc.\n\n Args:\n engine: Ignite Engine, it can be a trainer, validator or evaluator.\n batchdata: input data for this iteration, usually can be dictionary or tuple of Tensor data.\n\n Raises:\n NotImplementedError: When the subclass does not override this method.\n\n \"\"\"\n raise NotImplementedError(f\"Subclass {self.__class__.__name__} must implement this method.\")\n"
] | [
[
"numpy.testing.assert_array_equal",
"numpy.array"
],
[
"numpy.nanmax",
"numpy.testing.assert_allclose"
],
[
"numpy.rot90",
"numpy.stack"
],
[
"torch.nn.CrossEntropyLoss",
"numpy.random.random",
"numpy.unique",
"torch.load",
"torch.cat",
"numpy.asarray",
"torch.utils.data.DataLoader",
"torch.tensor",
"torch.cuda.is_available",
"numpy.testing.assert_allclose"
],
[
"torch.distributed.get_rank",
"torch.distributed.is_available",
"torch.distributed.is_initialized"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Mirkazemi/openml-python | [
"4ff66ed284790e4ae29245a15e23a3fa1f1c3a6b",
"4ff66ed284790e4ae29245a15e23a3fa1f1c3a6b"
] | [
"openml/tasks/functions.py",
"openml/runs/run.py"
] | [
"# License: BSD 3-Clause\n\nfrom collections import OrderedDict\nimport io\nimport re\nimport os\nfrom typing import Union, Dict, Optional\n\nimport pandas as pd\nimport xmltodict\n\nfrom ..exceptions import OpenMLCacheException\nfrom ..datasets import get_dataset\nfrom .task import (\n OpenMLClassificationTask,\n OpenMLClusteringTask,\n OpenMLLearningCurveTask,\n TaskType,\n OpenMLRegressionTask,\n OpenMLSupervisedTask,\n OpenMLTask,\n)\nimport openml.utils\nimport openml._api_calls\n\n\nTASKS_CACHE_DIR_NAME = \"tasks\"\n\n\ndef _get_cached_tasks():\n \"\"\"Return a dict of all the tasks which are cached locally.\n Returns\n -------\n tasks : OrderedDict\n A dict of all the cached tasks. Each task is an instance of\n OpenMLTask.\n \"\"\"\n tasks = OrderedDict()\n\n task_cache_dir = openml.utils._create_cache_directory(TASKS_CACHE_DIR_NAME)\n directory_content = os.listdir(task_cache_dir)\n directory_content.sort()\n # Find all dataset ids for which we have downloaded the dataset\n # description\n\n for filename in directory_content:\n if not re.match(r\"[0-9]*\", filename):\n continue\n\n tid = int(filename)\n tasks[tid] = _get_cached_task(tid)\n\n return tasks\n\n\ndef _get_cached_task(tid: int) -> OpenMLTask:\n \"\"\"Return a cached task based on the given id.\n\n Parameters\n ----------\n tid : int\n Id of the task.\n\n Returns\n -------\n OpenMLTask\n \"\"\"\n tid_cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, tid)\n\n try:\n with io.open(os.path.join(tid_cache_dir, \"task.xml\"), encoding=\"utf8\") as fh:\n return _create_task_from_xml(fh.read())\n except (OSError, IOError):\n openml.utils._remove_cache_dir_for_id(TASKS_CACHE_DIR_NAME, tid_cache_dir)\n raise OpenMLCacheException(\"Task file for tid %d not \" \"cached\" % tid)\n\n\ndef _get_estimation_procedure_list():\n \"\"\"Return a list of all estimation procedures which are on OpenML.\n Returns\n -------\n procedures : list\n A list of all estimation procedures. Every procedure is represented by\n a dictionary containing the following information: id, task type id,\n name, type, repeats, folds, stratified.\n \"\"\"\n url_suffix = \"estimationprocedure/list\"\n xml_string = openml._api_calls._perform_api_call(url_suffix, \"get\")\n\n procs_dict = xmltodict.parse(xml_string)\n # Minimalistic check if the XML is useful\n if \"oml:estimationprocedures\" not in procs_dict:\n raise ValueError(\"Error in return XML, does not contain tag \" \"oml:estimationprocedures.\")\n elif \"@xmlns:oml\" not in procs_dict[\"oml:estimationprocedures\"]:\n raise ValueError(\n \"Error in return XML, does not contain tag \"\n \"@xmlns:oml as a child of oml:estimationprocedures.\"\n )\n elif procs_dict[\"oml:estimationprocedures\"][\"@xmlns:oml\"] != \"http://openml.org/openml\":\n raise ValueError(\n \"Error in return XML, value of \"\n \"oml:estimationprocedures/@xmlns:oml is not \"\n \"http://openml.org/openml, but %s\"\n % str(procs_dict[\"oml:estimationprocedures\"][\"@xmlns:oml\"])\n )\n\n procs = []\n for proc_ in procs_dict[\"oml:estimationprocedures\"][\"oml:estimationprocedure\"]:\n procs.append(\n {\n \"id\": int(proc_[\"oml:id\"]),\n \"task_type_id\": TaskType(int(proc_[\"oml:ttid\"])),\n \"name\": proc_[\"oml:name\"],\n \"type\": proc_[\"oml:type\"],\n }\n )\n\n return procs\n\n\ndef list_tasks(\n task_type: Optional[TaskType] = None,\n offset: Optional[int] = None,\n size: Optional[int] = None,\n tag: Optional[str] = None,\n output_format: str = \"dict\",\n **kwargs\n) -> Union[Dict, pd.DataFrame]:\n \"\"\"\n Return a number of tasks having the given tag and task_type\n\n Parameters\n ----------\n Filter task_type is separated from the other filters because\n it is used as task_type in the task description, but it is named\n type when used as a filter in list tasks call.\n task_type : TaskType, optional\n ID of the task type as detailed `here <https://www.openml.org/search?type=task_type>`_.\n - Supervised classification: 1\n - Supervised regression: 2\n - Learning curve: 3\n - Supervised data stream classification: 4\n - Clustering: 5\n - Machine Learning Challenge: 6\n - Survival Analysis: 7\n - Subgroup Discovery: 8\n offset : int, optional\n the number of tasks to skip, starting from the first\n size : int, optional\n the maximum number of tasks to show\n tag : str, optional\n the tag to include\n output_format: str, optional (default='dict')\n The parameter decides the format of the output.\n - If 'dict' the output is a dict of dict\n - If 'dataframe' the output is a pandas DataFrame\n kwargs: dict, optional\n Legal filter operators: data_tag, status, data_id, data_name,\n number_instances, number_features,\n number_classes, number_missing_values.\n\n Returns\n -------\n dict\n All tasks having the given task_type and the give tag. Every task is\n represented by a dictionary containing the following information:\n task id, dataset id, task_type and status. If qualities are calculated\n for the associated dataset, some of these are also returned.\n dataframe\n All tasks having the given task_type and the give tag. Every task is\n represented by a row in the data frame containing the following information\n as columns: task id, dataset id, task_type and status. If qualities are\n calculated for the associated dataset, some of these are also returned.\n \"\"\"\n if output_format not in [\"dataframe\", \"dict\"]:\n raise ValueError(\n \"Invalid output format selected. \" \"Only 'dict' or 'dataframe' applicable.\"\n )\n return openml.utils._list_all(\n output_format=output_format,\n listing_call=_list_tasks,\n task_type=task_type,\n offset=offset,\n size=size,\n tag=tag,\n **kwargs\n )\n\n\ndef _list_tasks(task_type=None, output_format=\"dict\", **kwargs):\n \"\"\"\n Perform the api call to return a number of tasks having the given filters.\n Parameters\n ----------\n Filter task_type is separated from the other filters because\n it is used as task_type in the task description, but it is named\n type when used as a filter in list tasks call.\n task_type : TaskType, optional\n ID of the task type as detailed\n `here <https://www.openml.org/search?type=task_type>`_.\n - Supervised classification: 1\n - Supervised regression: 2\n - Learning curve: 3\n - Supervised data stream classification: 4\n - Clustering: 5\n - Machine Learning Challenge: 6\n - Survival Analysis: 7\n - Subgroup Discovery: 8\n output_format: str, optional (default='dict')\n The parameter decides the format of the output.\n - If 'dict' the output is a dict of dict\n - If 'dataframe' the output is a pandas DataFrame\n kwargs: dict, optional\n Legal filter operators: tag, task_id (list), data_tag, status, limit,\n offset, data_id, data_name, number_instances, number_features,\n number_classes, number_missing_values.\n\n Returns\n -------\n dict or dataframe\n \"\"\"\n api_call = \"task/list\"\n if task_type is not None:\n api_call += \"/type/%d\" % task_type.value\n if kwargs is not None:\n for operator, value in kwargs.items():\n if operator == \"task_id\":\n value = \",\".join([str(int(i)) for i in value])\n api_call += \"/%s/%s\" % (operator, value)\n return __list_tasks(api_call=api_call, output_format=output_format)\n\n\ndef __list_tasks(api_call, output_format=\"dict\"):\n xml_string = openml._api_calls._perform_api_call(api_call, \"get\")\n tasks_dict = xmltodict.parse(xml_string, force_list=(\"oml:task\", \"oml:input\"))\n # Minimalistic check if the XML is useful\n if \"oml:tasks\" not in tasks_dict:\n raise ValueError('Error in return XML, does not contain \"oml:runs\": %s' % str(tasks_dict))\n elif \"@xmlns:oml\" not in tasks_dict[\"oml:tasks\"]:\n raise ValueError(\n \"Error in return XML, does not contain \" '\"oml:runs\"/@xmlns:oml: %s' % str(tasks_dict)\n )\n elif tasks_dict[\"oml:tasks\"][\"@xmlns:oml\"] != \"http://openml.org/openml\":\n raise ValueError(\n \"Error in return XML, value of \"\n '\"oml:runs\"/@xmlns:oml is not '\n '\"http://openml.org/openml\": %s' % str(tasks_dict)\n )\n\n assert type(tasks_dict[\"oml:tasks\"][\"oml:task\"]) == list, type(tasks_dict[\"oml:tasks\"])\n\n tasks = dict()\n procs = _get_estimation_procedure_list()\n proc_dict = dict((x[\"id\"], x) for x in procs)\n\n for task_ in tasks_dict[\"oml:tasks\"][\"oml:task\"]:\n tid = None\n try:\n tid = int(task_[\"oml:task_id\"])\n task = {\n \"tid\": tid,\n \"ttid\": TaskType(int(task_[\"oml:task_type_id\"])),\n \"did\": int(task_[\"oml:did\"]),\n \"name\": task_[\"oml:name\"],\n \"task_type\": task_[\"oml:task_type\"],\n \"status\": task_[\"oml:status\"],\n }\n\n # Other task inputs\n for input in task_.get(\"oml:input\", list()):\n if input[\"@name\"] == \"estimation_procedure\":\n task[input[\"@name\"]] = proc_dict[int(input[\"#text\"])][\"name\"]\n else:\n value = input.get(\"#text\")\n task[input[\"@name\"]] = value\n\n # The number of qualities can range from 0 to infinity\n for quality in task_.get(\"oml:quality\", list()):\n if \"#text\" not in quality:\n quality_value = 0.0\n else:\n quality[\"#text\"] = float(quality[\"#text\"])\n if abs(int(quality[\"#text\"]) - quality[\"#text\"]) < 0.0000001:\n quality[\"#text\"] = int(quality[\"#text\"])\n quality_value = quality[\"#text\"]\n task[quality[\"@name\"]] = quality_value\n tasks[tid] = task\n except KeyError as e:\n if tid is not None:\n raise KeyError(\"Invalid xml for task %d: %s\\nFrom %s\" % (tid, e, task_))\n else:\n raise KeyError(\"Could not find key %s in %s!\" % (e, task_))\n\n if output_format == \"dataframe\":\n tasks = pd.DataFrame.from_dict(tasks, orient=\"index\")\n\n return tasks\n\n\ndef get_tasks(task_ids, download_data=True):\n \"\"\"Download tasks.\n\n This function iterates :meth:`openml.tasks.get_task`.\n\n Parameters\n ----------\n task_ids : iterable\n Integers/Strings representing task ids.\n download_data : bool\n Option to trigger download of data along with the meta data.\n\n Returns\n -------\n list\n \"\"\"\n tasks = []\n for task_id in task_ids:\n tasks.append(get_task(task_id, download_data))\n return tasks\n\n\[email protected]_safe_if_oslo_installed\ndef get_task(task_id: int, download_data: bool = True) -> OpenMLTask:\n \"\"\"Download OpenML task for a given task ID.\n\n Downloads the task representation, while the data splits can be\n downloaded optionally based on the additional parameter. Else,\n splits will either way be downloaded when the task is being used.\n\n Parameters\n ----------\n task_id : int or str\n The OpenML task id.\n download_data : bool\n Option to trigger download of data along with the meta data.\n\n Returns\n -------\n task\n \"\"\"\n try:\n task_id = int(task_id)\n except (ValueError, TypeError):\n raise ValueError(\"Dataset ID is neither an Integer nor can be \" \"cast to an Integer.\")\n\n tid_cache_dir = openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id,)\n\n try:\n task = _get_task_description(task_id)\n dataset = get_dataset(task.dataset_id, download_data)\n # List of class labels availaible in dataset description\n # Including class labels as part of task meta data handles\n # the case where data download was initially disabled\n if isinstance(task, (OpenMLClassificationTask, OpenMLLearningCurveTask)):\n task.class_labels = dataset.retrieve_class_labels(task.target_name)\n # Clustering tasks do not have class labels\n # and do not offer download_split\n if download_data:\n if isinstance(task, OpenMLSupervisedTask):\n task.download_split()\n except Exception as e:\n openml.utils._remove_cache_dir_for_id(\n TASKS_CACHE_DIR_NAME, tid_cache_dir,\n )\n raise e\n\n return task\n\n\ndef _get_task_description(task_id):\n\n try:\n return _get_cached_task(task_id)\n except OpenMLCacheException:\n xml_file = os.path.join(\n openml.utils._create_cache_directory_for_id(TASKS_CACHE_DIR_NAME, task_id,), \"task.xml\",\n )\n task_xml = openml._api_calls._perform_api_call(\"task/%d\" % task_id, \"get\")\n\n with io.open(xml_file, \"w\", encoding=\"utf8\") as fh:\n fh.write(task_xml)\n return _create_task_from_xml(task_xml)\n\n\ndef _create_task_from_xml(xml):\n \"\"\"Create a task given a xml string.\n\n Parameters\n ----------\n xml : string\n Task xml representation.\n\n Returns\n -------\n OpenMLTask\n \"\"\"\n dic = xmltodict.parse(xml)[\"oml:task\"]\n estimation_parameters = dict()\n inputs = dict()\n # Due to the unordered structure we obtain, we first have to extract\n # the possible keys of oml:input; dic[\"oml:input\"] is a list of\n # OrderedDicts\n\n # Check if there is a list of inputs\n if isinstance(dic[\"oml:input\"], list):\n for input_ in dic[\"oml:input\"]:\n name = input_[\"@name\"]\n inputs[name] = input_\n # Single input case\n elif isinstance(dic[\"oml:input\"], dict):\n name = dic[\"oml:input\"][\"@name\"]\n inputs[name] = dic[\"oml:input\"]\n\n evaluation_measures = None\n if \"evaluation_measures\" in inputs:\n evaluation_measures = inputs[\"evaluation_measures\"][\"oml:evaluation_measures\"][\n \"oml:evaluation_measure\"\n ]\n\n task_type = TaskType(int(dic[\"oml:task_type_id\"]))\n common_kwargs = {\n \"task_id\": dic[\"oml:task_id\"],\n \"task_type\": dic[\"oml:task_type\"],\n \"task_type_id\": task_type,\n \"data_set_id\": inputs[\"source_data\"][\"oml:data_set\"][\"oml:data_set_id\"],\n \"evaluation_measure\": evaluation_measures,\n }\n if task_type in (\n TaskType.SUPERVISED_CLASSIFICATION,\n TaskType.SUPERVISED_REGRESSION,\n TaskType.LEARNING_CURVE,\n ):\n # Convert some more parameters\n for parameter in inputs[\"estimation_procedure\"][\"oml:estimation_procedure\"][\n \"oml:parameter\"\n ]:\n name = parameter[\"@name\"]\n text = parameter.get(\"#text\", \"\")\n estimation_parameters[name] = text\n\n common_kwargs[\"estimation_procedure_type\"] = inputs[\"estimation_procedure\"][\n \"oml:estimation_procedure\"\n ][\"oml:type\"]\n common_kwargs[\"estimation_parameters\"] = estimation_parameters\n common_kwargs[\"target_name\"] = inputs[\"source_data\"][\"oml:data_set\"][\"oml:target_feature\"]\n common_kwargs[\"data_splits_url\"] = inputs[\"estimation_procedure\"][\n \"oml:estimation_procedure\"\n ][\"oml:data_splits_url\"]\n\n cls = {\n TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask,\n TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask,\n TaskType.CLUSTERING: OpenMLClusteringTask,\n TaskType.LEARNING_CURVE: OpenMLLearningCurveTask,\n }.get(task_type)\n if cls is None:\n raise NotImplementedError(\"Task type %s not supported.\" % common_kwargs[\"task_type\"])\n return cls(**common_kwargs)\n\n\ndef create_task(\n task_type: TaskType,\n dataset_id: int,\n estimation_procedure_id: int,\n target_name: Optional[str] = None,\n evaluation_measure: Optional[str] = None,\n **kwargs\n) -> Union[\n OpenMLClassificationTask, OpenMLRegressionTask, OpenMLLearningCurveTask, OpenMLClusteringTask\n]:\n \"\"\"Create a task based on different given attributes.\n\n Builds a task object with the function arguments as\n attributes. The type of the task object built is\n determined from the task type id.\n More information on how the arguments (task attributes),\n relate to the different possible tasks can be found in\n the individual task objects at the openml.tasks.task\n module.\n\n Parameters\n ----------\n task_type : TaskType\n Id of the task type.\n dataset_id : int\n The id of the dataset for the task.\n target_name : str, optional\n The name of the feature used as a target.\n At the moment, only optional for the clustering tasks.\n estimation_procedure_id : int\n The id of the estimation procedure.\n evaluation_measure : str, optional\n The name of the evaluation measure.\n kwargs : dict, optional\n Other task attributes that are not mandatory\n for task upload.\n\n Returns\n -------\n OpenMLClassificationTask, OpenMLRegressionTask,\n OpenMLLearningCurveTask, OpenMLClusteringTask\n \"\"\"\n task_cls = {\n TaskType.SUPERVISED_CLASSIFICATION: OpenMLClassificationTask,\n TaskType.SUPERVISED_REGRESSION: OpenMLRegressionTask,\n TaskType.CLUSTERING: OpenMLClusteringTask,\n TaskType.LEARNING_CURVE: OpenMLLearningCurveTask,\n }.get(task_type)\n\n if task_cls is None:\n raise NotImplementedError(\"Task type {0:d} not supported.\".format(task_type))\n else:\n return task_cls(\n task_type_id=task_type,\n task_type=None,\n data_set_id=dataset_id,\n target_name=target_name,\n estimation_procedure_id=estimation_procedure_id,\n evaluation_measure=evaluation_measure,\n **kwargs\n )\n",
"# License: BSD 3-Clause\n\nfrom collections import OrderedDict\nimport pickle\nimport time\nfrom typing import Any, IO, TextIO, List, Union, Tuple, Optional, Dict # noqa F401\nimport os\n\nimport arff\nimport numpy as np\n\nimport openml\nimport openml._api_calls\nfrom openml.base import OpenMLBase\nfrom ..exceptions import PyOpenMLError\nfrom ..flows import get_flow\nfrom ..tasks import (\n get_task,\n TaskType,\n OpenMLClassificationTask,\n OpenMLLearningCurveTask,\n OpenMLClusteringTask,\n OpenMLRegressionTask,\n)\n\n\nclass OpenMLRun(OpenMLBase):\n \"\"\"OpenML Run: result of running a model on an openml dataset.\n\n Parameters\n ----------\n task_id: int\n flow_id: int\n dataset_id: int\n setup_string: str\n output_files: Dict[str, str]\n A dictionary that specifies where each related file can be found.\n setup_id: int\n tags: List[str]\n uploader: int\n User ID of the uploader.\n uploader_name: str\n evaluations: Dict\n fold_evaluations: Dict\n sample_evaluations: Dict\n data_content: List[List]\n The predictions generated from executing this run.\n trace: OpenMLRunTrace\n model: object\n task_type: str\n task_evaluation_measure: str\n flow_name: str\n parameter_settings: List[OrderedDict]\n predictions_url: str\n task: OpenMLTask\n flow: OpenMLFlow\n run_id: int\n description_text: str, optional\n Description text to add to the predictions file.\n If left None,\n \"\"\"\n\n def __init__(\n self,\n task_id,\n flow_id,\n dataset_id,\n setup_string=None,\n output_files=None,\n setup_id=None,\n tags=None,\n uploader=None,\n uploader_name=None,\n evaluations=None,\n fold_evaluations=None,\n sample_evaluations=None,\n data_content=None,\n trace=None,\n model=None,\n task_type=None,\n task_evaluation_measure=None,\n flow_name=None,\n parameter_settings=None,\n predictions_url=None,\n task=None,\n flow=None,\n run_id=None,\n description_text=None,\n ):\n self.uploader = uploader\n self.uploader_name = uploader_name\n self.task_id = task_id\n self.task_type = task_type\n self.task_evaluation_measure = task_evaluation_measure\n self.flow_id = flow_id\n self.flow_name = flow_name\n self.setup_id = setup_id\n self.setup_string = setup_string\n self.parameter_settings = parameter_settings\n self.dataset_id = dataset_id\n self.evaluations = evaluations\n self.fold_evaluations = fold_evaluations\n self.sample_evaluations = sample_evaluations\n self.data_content = data_content\n self.output_files = output_files\n self.trace = trace\n self.error_message = None\n self.task = task\n self.flow = flow\n self.run_id = run_id\n self.model = model\n self.tags = tags\n self.predictions_url = predictions_url\n self.description_text = description_text\n\n @property\n def id(self) -> Optional[int]:\n return self.run_id\n\n def _get_repr_body_fields(self) -> List[Tuple[str, Union[str, int, List[str]]]]:\n \"\"\" Collect all information to display in the __repr__ body. \"\"\"\n fields = {\n \"Uploader Name\": self.uploader_name,\n \"Metric\": self.task_evaluation_measure,\n \"Run ID\": self.run_id,\n \"Task ID\": self.task_id,\n \"Task Type\": self.task_type,\n \"Task URL\": openml.tasks.OpenMLTask.url_for_id(self.task_id),\n \"Flow ID\": self.flow_id,\n \"Flow Name\": self.flow_name,\n \"Flow URL\": openml.flows.OpenMLFlow.url_for_id(self.flow_id),\n \"Setup ID\": self.setup_id,\n \"Setup String\": self.setup_string,\n \"Dataset ID\": self.dataset_id,\n \"Dataset URL\": openml.datasets.OpenMLDataset.url_for_id(self.dataset_id),\n }\n if self.uploader is not None:\n fields[\"Uploader Profile\"] = \"{}/u/{}\".format(\n openml.config.get_server_base_url(), self.uploader\n )\n if self.run_id is not None:\n fields[\"Run URL\"] = self.openml_url\n if self.evaluations is not None and self.task_evaluation_measure in self.evaluations:\n fields[\"Result\"] = self.evaluations[self.task_evaluation_measure]\n\n # determines the order in which the information will be printed\n order = [\n \"Uploader Name\",\n \"Uploader Profile\",\n \"Metric\",\n \"Result\",\n \"Run ID\",\n \"Run URL\",\n \"Task ID\",\n \"Task Type\",\n \"Task URL\",\n \"Flow ID\",\n \"Flow Name\",\n \"Flow URL\",\n \"Setup ID\",\n \"Setup String\",\n \"Dataset ID\",\n \"Dataset URL\",\n ]\n return [(key, fields[key]) for key in order if key in fields]\n\n @classmethod\n def from_filesystem(cls, directory: str, expect_model: bool = True) -> \"OpenMLRun\":\n \"\"\"\n The inverse of the to_filesystem method. Instantiates an OpenMLRun\n object based on files stored on the file system.\n\n Parameters\n ----------\n directory : str\n a path leading to the folder where the results\n are stored\n\n expect_model : bool\n if True, it requires the model pickle to be present, and an error\n will be thrown if not. Otherwise, the model might or might not\n be present.\n\n Returns\n -------\n run : OpenMLRun\n the re-instantiated run object\n \"\"\"\n\n # Avoiding cyclic imports\n import openml.runs.functions\n\n if not os.path.isdir(directory):\n raise ValueError(\"Could not find folder\")\n\n description_path = os.path.join(directory, \"description.xml\")\n predictions_path = os.path.join(directory, \"predictions.arff\")\n trace_path = os.path.join(directory, \"trace.arff\")\n model_path = os.path.join(directory, \"model.pkl\")\n\n if not os.path.isfile(description_path):\n raise ValueError(\"Could not find description.xml\")\n if not os.path.isfile(predictions_path):\n raise ValueError(\"Could not find predictions.arff\")\n if not os.path.isfile(model_path) and expect_model:\n raise ValueError(\"Could not find model.pkl\")\n\n with open(description_path, \"r\") as fht:\n xml_string = fht.read()\n run = openml.runs.functions._create_run_from_xml(xml_string, from_server=False)\n\n if run.flow_id is None:\n flow = openml.flows.OpenMLFlow.from_filesystem(directory)\n run.flow = flow\n run.flow_name = flow.name\n\n with open(predictions_path, \"r\") as fht:\n predictions = arff.load(fht)\n run.data_content = predictions[\"data\"]\n\n if os.path.isfile(model_path):\n # note that it will load the model if the file exists, even if\n # expect_model is False\n with open(model_path, \"rb\") as fhb:\n run.model = pickle.load(fhb)\n\n if os.path.isfile(trace_path):\n run.trace = openml.runs.OpenMLRunTrace._from_filesystem(trace_path)\n\n return run\n\n def to_filesystem(self, directory: str, store_model: bool = True,) -> None:\n \"\"\"\n The inverse of the from_filesystem method. Serializes a run\n on the filesystem, to be uploaded later.\n\n Parameters\n ----------\n directory : str\n a path leading to the folder where the results\n will be stored. Should be empty\n\n store_model : bool, optional (default=True)\n if True, a model will be pickled as well. As this is the most\n storage expensive part, it is often desirable to not store the\n model.\n \"\"\"\n if self.data_content is None or self.model is None:\n raise ValueError(\"Run should have been executed (and contain \" \"model / predictions)\")\n\n os.makedirs(directory, exist_ok=True)\n if not os.listdir(directory) == []:\n raise ValueError(\n \"Output directory {} should be empty\".format(os.path.abspath(directory))\n )\n\n run_xml = self._to_xml()\n predictions_arff = arff.dumps(self._generate_arff_dict())\n\n # It seems like typing does not allow to define the same variable multiple times\n with open(os.path.join(directory, \"description.xml\"), \"w\") as fh: # type: TextIO\n fh.write(run_xml)\n with open(os.path.join(directory, \"predictions.arff\"), \"w\") as fh:\n fh.write(predictions_arff)\n if store_model:\n with open(os.path.join(directory, \"model.pkl\"), \"wb\") as fh_b: # type: IO[bytes]\n pickle.dump(self.model, fh_b)\n\n if self.flow_id is None:\n self.flow.to_filesystem(directory)\n\n if self.trace is not None:\n self.trace._to_filesystem(directory)\n\n def _generate_arff_dict(self) -> \"OrderedDict[str, Any]\":\n \"\"\"Generates the arff dictionary for uploading predictions to the\n server.\n\n Assumes that the run has been executed.\n\n Returns\n -------\n arf_dict : dict\n Dictionary representation of the ARFF file that will be uploaded.\n Contains predictions and information about the run environment.\n \"\"\"\n if self.data_content is None:\n raise ValueError(\"Run has not been executed.\")\n if self.flow is None:\n self.flow = get_flow(self.flow_id)\n\n if self.description_text is None:\n self.description_text = time.strftime(\"%c\")\n task = get_task(self.task_id)\n\n arff_dict = OrderedDict() # type: 'OrderedDict[str, Any]'\n arff_dict[\"data\"] = self.data_content\n arff_dict[\"description\"] = self.description_text\n arff_dict[\"relation\"] = \"openml_task_{}_predictions\".format(task.task_id)\n\n if isinstance(task, OpenMLLearningCurveTask):\n class_labels = task.class_labels\n instance_specifications = [\n (\"repeat\", \"NUMERIC\"),\n (\"fold\", \"NUMERIC\"),\n (\"sample\", \"NUMERIC\"),\n (\"row_id\", \"NUMERIC\"),\n ]\n\n arff_dict[\"attributes\"] = instance_specifications\n if class_labels is not None:\n arff_dict[\"attributes\"] = (\n arff_dict[\"attributes\"]\n + [\n (\"confidence.\" + class_labels[i], \"NUMERIC\")\n for i in range(len(class_labels))\n ]\n + [(\"prediction\", class_labels), (\"correct\", class_labels)]\n )\n else:\n raise ValueError(\"The task has no class labels\")\n\n elif isinstance(task, OpenMLClassificationTask):\n class_labels = task.class_labels\n instance_specifications = [\n (\"repeat\", \"NUMERIC\"),\n (\"fold\", \"NUMERIC\"),\n (\"sample\", \"NUMERIC\"), # Legacy\n (\"row_id\", \"NUMERIC\"),\n ]\n\n arff_dict[\"attributes\"] = instance_specifications\n if class_labels is not None:\n prediction_confidences = [\n (\"confidence.\" + class_labels[i], \"NUMERIC\") for i in range(len(class_labels))\n ]\n prediction_and_true = [(\"prediction\", class_labels), (\"correct\", class_labels)]\n arff_dict[\"attributes\"] = (\n arff_dict[\"attributes\"] + prediction_confidences + prediction_and_true\n )\n else:\n raise ValueError(\"The task has no class labels\")\n\n elif isinstance(task, OpenMLRegressionTask):\n arff_dict[\"attributes\"] = [\n (\"repeat\", \"NUMERIC\"),\n (\"fold\", \"NUMERIC\"),\n (\"row_id\", \"NUMERIC\"),\n (\"prediction\", \"NUMERIC\"),\n (\"truth\", \"NUMERIC\"),\n ]\n\n elif isinstance(task, OpenMLClusteringTask):\n arff_dict[\"attributes\"] = [\n (\"repeat\", \"NUMERIC\"),\n (\"fold\", \"NUMERIC\"),\n (\"row_id\", \"NUMERIC\"),\n (\"cluster\", \"NUMERIC\"),\n ]\n\n else:\n raise NotImplementedError(\"Task type %s is not yet supported.\" % str(task.task_type))\n\n return arff_dict\n\n def get_metric_fn(self, sklearn_fn, kwargs=None):\n \"\"\"Calculates metric scores based on predicted values. Assumes the\n run has been executed locally (and contains run_data). Furthermore,\n it assumes that the 'correct' or 'truth' attribute is specified in\n the arff (which is an optional field, but always the case for\n openml-python runs)\n\n Parameters\n ----------\n sklearn_fn : function\n a function pointer to a sklearn function that\n accepts ``y_true``, ``y_pred`` and ``**kwargs``\n\n Returns\n -------\n scores : list\n a list of floats, of length num_folds * num_repeats\n \"\"\"\n kwargs = kwargs if kwargs else dict()\n if self.data_content is not None and self.task_id is not None:\n predictions_arff = self._generate_arff_dict()\n elif \"predictions\" in self.output_files:\n predictions_file_url = openml._api_calls._file_id_to_url(\n self.output_files[\"predictions\"], \"predictions.arff\",\n )\n response = openml._api_calls._download_text_file(predictions_file_url)\n predictions_arff = arff.loads(response)\n # TODO: make this a stream reader\n else:\n raise ValueError(\n \"Run should have been locally executed or \" \"contain outputfile reference.\"\n )\n\n # Need to know more about the task to compute scores correctly\n task = get_task(self.task_id)\n\n attribute_names = [att[0] for att in predictions_arff[\"attributes\"]]\n if (\n task.task_type_id in [TaskType.SUPERVISED_CLASSIFICATION, TaskType.LEARNING_CURVE]\n and \"correct\" not in attribute_names\n ):\n raise ValueError('Attribute \"correct\" should be set for ' \"classification task runs\")\n if task.task_type_id == TaskType.SUPERVISED_REGRESSION and \"truth\" not in attribute_names:\n raise ValueError('Attribute \"truth\" should be set for ' \"regression task runs\")\n if task.task_type_id != TaskType.CLUSTERING and \"prediction\" not in attribute_names:\n raise ValueError('Attribute \"predict\" should be set for ' \"supervised task runs\")\n\n def _attribute_list_to_dict(attribute_list):\n # convenience function: Creates a mapping to map from the name of\n # attributes present in the arff prediction file to their index.\n # This is necessary because the number of classes can be different\n # for different tasks.\n res = OrderedDict()\n for idx in range(len(attribute_list)):\n res[attribute_list[idx][0]] = idx\n return res\n\n attribute_dict = _attribute_list_to_dict(predictions_arff[\"attributes\"])\n\n repeat_idx = attribute_dict[\"repeat\"]\n fold_idx = attribute_dict[\"fold\"]\n predicted_idx = attribute_dict[\"prediction\"] # Assume supervised task\n\n if (\n task.task_type_id == TaskType.SUPERVISED_CLASSIFICATION\n or task.task_type_id == TaskType.LEARNING_CURVE\n ):\n correct_idx = attribute_dict[\"correct\"]\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n correct_idx = attribute_dict[\"truth\"]\n has_samples = False\n if \"sample\" in attribute_dict:\n sample_idx = attribute_dict[\"sample\"]\n has_samples = True\n\n if (\n predictions_arff[\"attributes\"][predicted_idx][1]\n != predictions_arff[\"attributes\"][correct_idx][1]\n ):\n pred = predictions_arff[\"attributes\"][predicted_idx][1]\n corr = predictions_arff[\"attributes\"][correct_idx][1]\n raise ValueError(\n \"Predicted and Correct do not have equal values:\"\n \" %s Vs. %s\" % (str(pred), str(corr))\n )\n\n # TODO: these could be cached\n values_predict = {}\n values_correct = {}\n for line_idx, line in enumerate(predictions_arff[\"data\"]):\n rep = line[repeat_idx]\n fold = line[fold_idx]\n if has_samples:\n samp = line[sample_idx]\n else:\n samp = 0 # No learning curve sample, always 0\n\n if task.task_type_id in [\n TaskType.SUPERVISED_CLASSIFICATION,\n TaskType.LEARNING_CURVE,\n ]:\n prediction = predictions_arff[\"attributes\"][predicted_idx][1].index(\n line[predicted_idx]\n )\n correct = predictions_arff[\"attributes\"][predicted_idx][1].index(line[correct_idx])\n elif task.task_type_id == TaskType.SUPERVISED_REGRESSION:\n prediction = line[predicted_idx]\n correct = line[correct_idx]\n if rep not in values_predict:\n values_predict[rep] = OrderedDict()\n values_correct[rep] = OrderedDict()\n if fold not in values_predict[rep]:\n values_predict[rep][fold] = OrderedDict()\n values_correct[rep][fold] = OrderedDict()\n if samp not in values_predict[rep][fold]:\n values_predict[rep][fold][samp] = []\n values_correct[rep][fold][samp] = []\n\n values_predict[rep][fold][samp].append(prediction)\n values_correct[rep][fold][samp].append(correct)\n\n scores = []\n for rep in values_predict.keys():\n for fold in values_predict[rep].keys():\n last_sample = len(values_predict[rep][fold]) - 1\n y_pred = values_predict[rep][fold][last_sample]\n y_true = values_correct[rep][fold][last_sample]\n scores.append(sklearn_fn(y_true, y_pred, **kwargs))\n return np.array(scores)\n\n def _parse_publish_response(self, xml_response: Dict):\n \"\"\" Parse the id from the xml_response and assign it to self. \"\"\"\n self.run_id = int(xml_response[\"oml:upload_run\"][\"oml:run_id\"])\n\n def _get_file_elements(self) -> Dict:\n \"\"\" Get file_elements to upload to the server.\n\n Derived child classes should overwrite this method as necessary.\n The description field will be populated automatically if not provided.\n \"\"\"\n if self.parameter_settings is None and self.model is None:\n raise PyOpenMLError(\n \"OpenMLRun must contain a model or be initialized with parameter_settings.\"\n )\n if self.flow_id is None:\n if self.flow is None:\n raise PyOpenMLError(\n \"OpenMLRun object does not contain a flow id or reference to OpenMLFlow \"\n \"(these should have been added while executing the task). \"\n )\n else:\n # publish the linked Flow before publishing the run.\n self.flow.publish()\n self.flow_id = self.flow.flow_id\n\n if self.parameter_settings is None:\n if self.flow is None:\n self.flow = openml.flows.get_flow(self.flow_id)\n self.parameter_settings = self.flow.extension.obtain_parameter_values(\n self.flow, self.model,\n )\n\n file_elements = {\"description\": (\"description.xml\", self._to_xml())}\n\n if self.error_message is None:\n predictions = arff.dumps(self._generate_arff_dict())\n file_elements[\"predictions\"] = (\"predictions.arff\", predictions)\n\n if self.trace is not None:\n trace_arff = arff.dumps(self.trace.trace_to_arff())\n file_elements[\"trace\"] = (\"trace.arff\", trace_arff)\n return file_elements\n\n def _to_dict(self) -> \"OrderedDict[str, OrderedDict]\":\n \"\"\" Creates a dictionary representation of self. \"\"\"\n description = OrderedDict() # type: 'OrderedDict'\n description[\"oml:run\"] = OrderedDict()\n description[\"oml:run\"][\"@xmlns:oml\"] = \"http://openml.org/openml\"\n description[\"oml:run\"][\"oml:task_id\"] = self.task_id\n description[\"oml:run\"][\"oml:flow_id\"] = self.flow_id\n if self.error_message is not None:\n description[\"oml:run\"][\"oml:error_message\"] = self.error_message\n description[\"oml:run\"][\"oml:parameter_setting\"] = self.parameter_settings\n if self.tags is not None:\n description[\"oml:run\"][\"oml:tag\"] = self.tags # Tags describing the run\n if (self.fold_evaluations is not None and len(self.fold_evaluations) > 0) or (\n self.sample_evaluations is not None and len(self.sample_evaluations) > 0\n ):\n description[\"oml:run\"][\"oml:output_data\"] = OrderedDict()\n description[\"oml:run\"][\"oml:output_data\"][\"oml:evaluation\"] = list()\n if self.fold_evaluations is not None:\n for measure in self.fold_evaluations:\n for repeat in self.fold_evaluations[measure]:\n for fold, value in self.fold_evaluations[measure][repeat].items():\n current = OrderedDict(\n [\n (\"@repeat\", str(repeat)),\n (\"@fold\", str(fold)),\n (\"oml:name\", measure),\n (\"oml:value\", str(value)),\n ]\n )\n description[\"oml:run\"][\"oml:output_data\"][\"oml:evaluation\"].append(current)\n if self.sample_evaluations is not None:\n for measure in self.sample_evaluations:\n for repeat in self.sample_evaluations[measure]:\n for fold in self.sample_evaluations[measure][repeat]:\n for sample, value in self.sample_evaluations[measure][repeat][fold].items():\n current = OrderedDict(\n [\n (\"@repeat\", str(repeat)),\n (\"@fold\", str(fold)),\n (\"@sample\", str(sample)),\n (\"oml:name\", measure),\n (\"oml:value\", str(value)),\n ]\n )\n description[\"oml:run\"][\"oml:output_data\"][\"oml:evaluation\"].append(\n current\n )\n return description\n"
] | [
[
"pandas.DataFrame.from_dict"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jackyoung96/PettingZoo | [
"7ebcd1ddf9124b6857048af930d677974ab201a6",
"7ebcd1ddf9124b6857048af930d677974ab201a6"
] | [
"pettingzoo/mpe/scenarios/simple_world_comm.py",
"pettingzoo/mpe/scenarios/simple_spread.py"
] | [
"import numpy as np\n\nfrom .._mpe_utils.core import Agent, Landmark, World\nfrom .._mpe_utils.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(\n self,\n num_good_agents=2,\n num_adversaries=4,\n num_landmarks=1,\n num_food=2,\n num_forests=2,\n ):\n world = World()\n # set any world properties first\n world.dim_c = 4\n # world.damping = 1\n num_good_agents = num_good_agents\n num_adversaries = num_adversaries\n num_agents = num_adversaries + num_good_agents\n num_landmarks = num_landmarks\n num_food = num_food\n num_forests = num_forests\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.adversary = True if i < num_adversaries else False\n base_index = i - 1 if i < num_adversaries else i - num_adversaries\n base_index = 0 if base_index < 0 else base_index\n base_name = \"adversary\" if agent.adversary else \"agent\"\n base_name = \"leadadversary\" if i == 0 else base_name\n agent.name = f\"{base_name}_{base_index}\"\n agent.collide = True\n agent.leader = True if i == 0 else False\n agent.silent = True if i > 0 else False\n agent.size = 0.075 if agent.adversary else 0.045\n agent.accel = 3.0 if agent.adversary else 4.0\n # agent.accel = 20.0 if agent.adversary else 25.0\n agent.max_speed = 1.0 if agent.adversary else 1.3\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = \"landmark %d\" % i\n landmark.collide = True\n landmark.movable = False\n landmark.size = 0.2\n landmark.boundary = False\n world.food = [Landmark() for i in range(num_food)]\n for i, lm in enumerate(world.food):\n lm.name = \"food %d\" % i\n lm.collide = False\n lm.movable = False\n lm.size = 0.03\n lm.boundary = False\n world.forests = [Landmark() for i in range(num_forests)]\n for i, lm in enumerate(world.forests):\n lm.name = \"forest %d\" % i\n lm.collide = False\n lm.movable = False\n lm.size = 0.3\n lm.boundary = False\n world.landmarks += world.food\n world.landmarks += world.forests\n # world.landmarks += self.set_boundaries(world)\n # world boundaries now penalized with negative reward\n return world\n\n def set_boundaries(self, world):\n boundary_list = []\n landmark_size = 1\n edge = 1 + landmark_size\n num_landmarks = int(edge * 2 / landmark_size)\n for x_pos in [-edge, edge]:\n for i in range(num_landmarks):\n landmark = Landmark()\n landmark.state.p_pos = np.array([x_pos, -1 + i * landmark_size])\n boundary_list.append(landmark)\n\n for y_pos in [-edge, edge]:\n for i in range(num_landmarks):\n landmark = Landmark()\n landmark.state.p_pos = np.array([-1 + i * landmark_size, y_pos])\n boundary_list.append(landmark)\n\n for i, l in enumerate(boundary_list):\n l.name = \"boundary %d\" % i\n l.collide = True\n l.movable = False\n l.boundary = True\n l.color = np.array([0.75, 0.75, 0.75])\n l.size = landmark_size\n l.state.p_vel = np.zeros(world.dim_p)\n\n return boundary_list\n\n def reset_world(self, world, np_random):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = (\n np.array([0.45, 0.95, 0.45])\n if not agent.adversary\n else np.array([0.95, 0.45, 0.45])\n )\n agent.color -= (\n np.array([0.3, 0.3, 0.3]) if agent.leader else np.array([0, 0, 0])\n )\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n for i, landmark in enumerate(world.food):\n landmark.color = np.array([0.15, 0.15, 0.65])\n for i, landmark in enumerate(world.forests):\n landmark.color = np.array([0.6, 0.9, 0.6])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np_random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n for i, landmark in enumerate(world.food):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n for i, landmark in enumerate(world.forests):\n landmark.state.p_pos = np_random.uniform(-0.9, +0.9, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n if agent.adversary:\n collisions = 0\n for a in self.good_agents(world):\n if self.is_collision(a, agent):\n collisions += 1\n return collisions\n else:\n return 0\n\n def is_collision(self, agent1, agent2):\n delta_pos = agent1.state.p_pos - agent2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = agent1.size + agent2.size\n return True if dist < dist_min else False\n\n # return all agents that are not adversaries\n def good_agents(self, world):\n return [agent for agent in world.agents if not agent.adversary]\n\n # return all adversarial agents\n def adversaries(self, world):\n return [agent for agent in world.agents if agent.adversary]\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n # boundary_reward = -10 if self.outside_boundary(agent) else 0\n main_reward = (\n self.adversary_reward(agent, world)\n if agent.adversary\n else self.agent_reward(agent, world)\n )\n return main_reward\n\n def outside_boundary(self, agent):\n if (\n agent.state.p_pos[0] > 1\n or agent.state.p_pos[0] < -1\n or agent.state.p_pos[1] > 1\n or agent.state.p_pos[1] < -1\n ):\n return True\n else:\n return False\n\n def agent_reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n rew = 0\n shape = False\n adversaries = self.adversaries(world)\n if shape:\n for adv in adversaries:\n rew += 0.1 * np.sqrt(\n np.sum(np.square(agent.state.p_pos - adv.state.p_pos))\n )\n if agent.collide:\n for a in adversaries:\n if self.is_collision(a, agent):\n rew -= 5\n\n def bound(x):\n if x < 0.9:\n return 0\n if x < 1.0:\n return (x - 0.9) * 10\n return min(np.exp(2 * x - 2), 10) # 1 + (x - 1) * (x - 1)\n\n for p in range(world.dim_p):\n x = abs(agent.state.p_pos[p])\n rew -= 2 * bound(x)\n\n for food in world.food:\n if self.is_collision(agent, food):\n rew += 2\n rew -= 0.05 * min(\n np.sqrt(np.sum(np.square(food.state.p_pos - agent.state.p_pos)))\n for food in world.food\n )\n\n return rew\n\n def adversary_reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark\n rew = 0\n shape = True\n agents = self.good_agents(world)\n adversaries = self.adversaries(world)\n if shape:\n rew -= 0.1 * min(\n np.sqrt(np.sum(np.square(a.state.p_pos - agent.state.p_pos)))\n for a in agents\n )\n if agent.collide:\n for ag in agents:\n for adv in adversaries:\n if self.is_collision(ag, adv):\n rew += 5\n return rew\n\n def observation2(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n if not entity.boundary:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n food_pos = []\n for entity in world.food:\n if not entity.boundary:\n food_pos.append(entity.state.p_pos - agent.state.p_pos)\n # communication of all other agents\n comm = []\n other_pos = []\n other_vel = []\n for other in world.agents:\n if other is agent:\n continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n )\n\n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks:\n if not entity.boundary:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n\n in_forest = [np.array([-1]) for _ in range(len(world.forests))]\n inf = [False for _ in range(len(world.forests))]\n\n for i in range(len(world.forests)):\n if self.is_collision(agent, world.forests[i]):\n in_forest[i] = np.array([1])\n inf[i] = True\n\n food_pos = []\n for entity in world.food:\n if not entity.boundary:\n food_pos.append(entity.state.p_pos - agent.state.p_pos)\n # communication of all other agents\n comm = []\n other_pos = []\n other_vel = []\n for other in world.agents:\n if other is agent:\n continue\n comm.append(other.state.c)\n\n oth_f = [\n self.is_collision(other, world.forests[i])\n for i in range(len(world.forests))\n ]\n\n # without forest vis\n for i in range(len(world.forests)):\n if inf[i] and oth_f[i]:\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n break\n else:\n if ((not any(inf)) and (not any(oth_f))) or agent.leader:\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n if not other.adversary:\n other_vel.append(other.state.p_vel)\n else:\n other_pos.append([0, 0])\n if not other.adversary:\n other_vel.append([0, 0])\n\n # to tell the pred when the prey are in the forest\n prey_forest = []\n ga = self.good_agents(world)\n for a in ga:\n if any([self.is_collision(a, f) for f in world.forests]):\n prey_forest.append(np.array([1]))\n else:\n prey_forest.append(np.array([-1]))\n # to tell leader when pred are in forest\n prey_forest_lead = []\n for f in world.forests:\n if any([self.is_collision(a, f) for a in ga]):\n prey_forest_lead.append(np.array([1]))\n else:\n prey_forest_lead.append(np.array([-1]))\n\n comm = [world.agents[0].state.c]\n\n if agent.adversary and not agent.leader:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n + in_forest\n + comm\n )\n if agent.leader:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + other_vel\n + in_forest\n + comm\n )\n else:\n return np.concatenate(\n [agent.state.p_vel]\n + [agent.state.p_pos]\n + entity_pos\n + other_pos\n + in_forest\n + other_vel\n )\n",
"import numpy as np\n\nfrom .._mpe_utils.core import Agent, Landmark, World\nfrom .._mpe_utils.scenario import BaseScenario\n\n\nclass Scenario(BaseScenario):\n def make_world(self, N=3):\n world = World()\n # set any world properties first\n world.dim_c = 2\n num_agents = N\n num_landmarks = N\n world.collaborative = True\n # add agents\n world.agents = [Agent() for i in range(num_agents)]\n for i, agent in enumerate(world.agents):\n agent.name = f\"agent_{i}\"\n agent.collide = True\n agent.silent = True\n agent.size = 0.15\n # add landmarks\n world.landmarks = [Landmark() for i in range(num_landmarks)]\n for i, landmark in enumerate(world.landmarks):\n landmark.name = \"landmark %d\" % i\n landmark.collide = False\n landmark.movable = False\n return world\n\n def reset_world(self, world, np_random):\n # random properties for agents\n for i, agent in enumerate(world.agents):\n agent.color = np.array([0.35, 0.35, 0.85])\n # random properties for landmarks\n for i, landmark in enumerate(world.landmarks):\n landmark.color = np.array([0.25, 0.25, 0.25])\n # set random initial states\n for agent in world.agents:\n agent.state.p_pos = np_random.uniform(-1, +1, world.dim_p)\n agent.state.p_vel = np.zeros(world.dim_p)\n agent.state.c = np.zeros(world.dim_c)\n for i, landmark in enumerate(world.landmarks):\n landmark.state.p_pos = np_random.uniform(-1, +1, world.dim_p)\n landmark.state.p_vel = np.zeros(world.dim_p)\n\n def benchmark_data(self, agent, world):\n rew = 0\n collisions = 0\n occupied_landmarks = 0\n min_dists = 0\n for lm in world.landmarks:\n dists = [\n np.sqrt(np.sum(np.square(a.state.p_pos - lm.state.p_pos)))\n for a in world.agents\n ]\n min_dists += min(dists)\n rew -= min(dists)\n if min(dists) < 0.1:\n occupied_landmarks += 1\n if agent.collide:\n for a in world.agents:\n if self.is_collision(a, agent):\n rew -= 1\n collisions += 1\n return (rew, collisions, min_dists, occupied_landmarks)\n\n def is_collision(self, agent1, agent2):\n delta_pos = agent1.state.p_pos - agent2.state.p_pos\n dist = np.sqrt(np.sum(np.square(delta_pos)))\n dist_min = agent1.size + agent2.size\n return True if dist < dist_min else False\n\n def reward(self, agent, world):\n # Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions\n rew = 0\n if agent.collide:\n for a in world.agents:\n if self.is_collision(a, agent):\n rew -= 1\n return rew\n\n def global_reward(self, world):\n rew = 0\n for lm in world.landmarks:\n dists = [\n np.sqrt(np.sum(np.square(a.state.p_pos - lm.state.p_pos)))\n for a in world.agents\n ]\n rew -= min(dists)\n return rew\n\n def observation(self, agent, world):\n # get positions of all entities in this agent's reference frame\n entity_pos = []\n for entity in world.landmarks: # world.entities:\n entity_pos.append(entity.state.p_pos - agent.state.p_pos)\n # entity colors\n entity_color = []\n for entity in world.landmarks: # world.entities:\n entity_color.append(entity.color)\n # communication of all other agents\n comm = []\n other_pos = []\n for other in world.agents:\n if other is agent:\n continue\n comm.append(other.state.c)\n other_pos.append(other.state.p_pos - agent.state.p_pos)\n return np.concatenate(\n [agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm\n )\n"
] | [
[
"numpy.square",
"numpy.concatenate",
"numpy.exp",
"numpy.array",
"numpy.zeros"
],
[
"numpy.concatenate",
"numpy.square",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nicohrubec/blackjack_simulator | [
"b934a61f29ceae1bd37238963dfc83888bcdb5bd",
"b934a61f29ceae1bd37238963dfc83888bcdb5bd"
] | [
"src/game_simulation/players.py",
"src/dashboard/app.py"
] | [
"from src import configs\nimport pandas as pd\nimport numpy as np\n\n\n# creates player instances from predefined options\ndef player_factory(player_type, capital):\n if player_type == 'basic':\n return BasicPlayer(init_capital=capital)\n elif player_type == 'strategic':\n return StrategicPlayer(init_capital=capital, file_name='thorp_strategy.xlsx')\n elif player_type == 'counter':\n return CountingPlayer(init_capital=capital, file_name='thorp_strategy.xlsx', bet_unit=5)\n else:\n raise ValueError('There is no such player.')\n\n\n# Meta class from which further player types are inherited. To create new player types from this class you\n# will have to implement both the betting strategy and the playing strategy for the player you want to create.\n# Examples for how to create players from this player class are given below.\nclass Player(object):\n def __init__(self, init_capital):\n self.capital = init_capital\n\n def bet(self):\n raise NotImplementedError # for a given game situation how much does the player want to bet ?\n\n def bet_amount(self, amount):\n self.capital -= amount\n\n def play(self, player_cards, dealer_cards):\n raise NotImplementedError # for a given game situation what move does the player pick ?\n\n def add_capital(self, amount):\n self.capital += amount\n\n def get_capital(self):\n return self.capital\n\n def is_counter(self):\n return False # reimplement this to True if the player deploys a card counting strategy\n\n\nclass BasicPlayer(Player):\n\n def bet(self):\n if self.capital > 5:\n self.capital -= 5\n return 5\n else:\n return 0\n\n def play(self, player_cards, dealer_cards):\n player_value = sum(player_cards)\n\n if player_cards[0] == player_cards[1]:\n return 'P'\n elif player_value == 11:\n return 'D'\n elif player_value < 17:\n return 'H'\n else:\n return 'S'\n\n\n# This player deploys a naive betting strategy but uses a given strategy card (example in strategies folder)\n# to guide his moves.\nclass StrategicPlayer(Player):\n\n def __init__(self, init_capital, file_name):\n super().__init__(init_capital)\n strategy_path = configs.strategies_folder / file_name\n self.strategy_card = pd.read_excel(strategy_path, index_col=0, header=1)\n self.strategy_card.columns = [str(col) for col in self.strategy_card.columns] # convert columns to string\n self.strategy_card.index = self.strategy_card.index.map(str) # convert index to string\n\n def bet(self, *args, **kwargs): # naive betting\n if self.capital > 5:\n self.capital -= 5\n return 5\n else:\n return 0\n\n def play(self, player_cards, dealer_cards):\n player_value = sum(player_cards)\n\n if player_value == 21:\n return 'S'\n\n if len(player_cards) == 2: # first move\n if player_cards[0] == player_cards[1]: # split possible\n player_selector = 'D' + str(player_cards[0]) # eg D8 for double 8s\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n elif 11 in player_cards: # soft hand\n if player_value <= 21:\n player_selector = 'A' + str(player_value - 11)\n else:\n player_selector = str(player_value - 10)\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n else:\n return self.strategy_card.loc[str(player_value), str(dealer_cards)]\n\n else: # further moves --> only hit or stand allowed\n if 11 in player_cards: # soft hand\n if player_value <= 21:\n player_selector = 'A' + str(player_value - 11)\n else:\n player_selector = str(player_value - 10)\n return self.strategy_card.loc[player_selector, str(dealer_cards)]\n else: # hard hand\n return self.strategy_card.loc[str(player_value), str(dealer_cards)] if player_value < 21 else 'S'\n\n\n# This player plays basic strategy like the strategic player but he spreads his bet sizes according\n# to the current count. Count method used here is the HILO system. (+1 for 2-6, 0 for 7-9, -1 for 10 valued cards+ace)\n# Bet size is then computed as true count (running_count / number of remaining decks) - 1 * bet unit\nclass CountingPlayer(StrategicPlayer):\n def __init__(self, init_capital, file_name, bet_unit):\n super().__init__(init_capital, file_name)\n self.bet_unit = bet_unit\n self.running_count = 0\n self.num_seen_cards = 0\n\n def bet(self, num_decks, *args, **kwargs):\n super(CountingPlayer, self).bet(num_decks, *args, **kwargs)\n return max((self.get_true_count(num_decks) - 1) * self.bet_unit, self.bet_unit)\n\n def update_count(self, *args):\n for cards in args:\n for card in cards:\n self.running_count += self.get_card_value(card)\n\n self.num_seen_cards += len(cards)\n\n def reset_count(self):\n self.running_count = 0\n self.num_seen_cards = 0\n\n @staticmethod\n def get_card_value(card):\n if card == 1 or card == 11 or card == 10:\n return -1\n elif card < 7:\n return 1\n else:\n return 0\n\n def get_true_count(self, num_decks):\n num_played_decks = np.round(self.num_seen_cards / 52)\n remaining_decks = num_decks - num_played_decks\n\n return self.running_count / remaining_decks\n\n def is_counter(self):\n return True\n",
"import dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\n\nimport pandas as pd\nimport random\n\nfrom src import configs\n\nexternal_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n\n\n# selection for individual run visualisation\ndef get_dropdown(title, id, options):\n return html.Div([\n html.H4(title),\n dcc.Dropdown(\n id=id,\n options=[{'label': i, 'value': i} for i in options],\n value=options[0]\n )\n ], style={'margin': '2%'})\n\n\ndef get_summary_graph(df, capital, stat, title):\n num_decks, deck_penetration, players, budgets = df.num_decks.unique(), df.deck_penetration.unique(), \\\n df.player.unique(), df.capital.unique()\n num_settings = len(num_decks) * len(deck_penetration) * len(players) * len(budgets)\n num_rounds = int(len(df) / num_settings)\n\n return html.Div([\n html.Div([\n html.H4(title)\n ], style={\"align\": \"bottom\"}),\n dcc.Graph(\n id=str(stat) + '_results' + str(capital),\n figure={\n 'data': [\n dict(\n x=[i for i in range(1, num_rounds + 1)],\n y=df[(df.num_decks == d) & (df.capital == capital) & (df.deck_penetration == p) &\n (df.player == i)][('step_' + str(stat))],\n mode='lines',\n name=str(i) + ' player, ' + str(p) + ' deck penetration, ' + str(d) + ' decks'\n ) for d in num_decks for p in deck_penetration for i in players\n ],\n 'layout': dict(\n xaxis={'title': 'Num rounds after game start'},\n yaxis={'title': 'Mean remaining capital of the player'},\n legend={'x': 1, 'y': 1}\n )\n }\n )\n ])\n\n\n# create line graphs comparing statistics of the individual simulation setting results\ndef get_summary_graphs(df, capital):\n return html.Div([\n get_summary_graph(df, capital, 'mean', 'Mean remaining capital at a given step for player '\n 'with init capital of ' + str(capital)),\n get_summary_graph(df, capital, 'std', 'Std of remaining capital at a given step for player '\n 'with init capital of ' + str(capital))\n ], style={'columnCount': 1})\n\n\n# show data.head()\ndef generate_table(df, rows=5):\n return html.Div([html.Table([\n html.Thead(\n html.Tr([html.Th(col) for col in df.columns])\n ),\n html.Tbody([\n html.Tr([\n html.Td(df.iloc[i][col]) for col in df.columns\n ]) for i in range(min(len(df), rows))\n ]),\n ])], style={'width': '100%', 'overflowX': 'scroll'})\n\n\n# line graph of a sample of the individual run results for a selected setting\ndef generate_raw_figure(df, sample=100):\n run_cols = [col for col in df.columns if col.startswith('run')]\n return {\n 'data': [\n dict(\n x=[i for i in range(1, 101)],\n y=df[run_name],\n mode='lines',\n name=run_name\n ) for run_name in random.sample(run_cols, min(sample, len(run_cols)))\n ],\n 'layout': dict(\n xaxis={'title': 'Num rounds after game start'},\n yaxis={'title': 'Remaining capital of the player'}\n )\n }\n\n\ndef get_app(data):\n dashboard = dash.Dash(__name__, external_stylesheets=external_stylesheets)\n\n dashboard.layout = html.Div([\n html.H1('Blackjack Simulation'),\n\n html.H2('1. Summary statistics of all Simulations'),\n html.Div([get_summary_graphs(data, c) for c in data.capital.unique()]),\n\n html.H2('2. Look at specific simulations'),\n html.H3('Simulation Settings'),\n get_dropdown('Player', 'player_dd', data.player.unique()),\n get_dropdown('Num Decks', 'num_decks_dd', data.num_decks.unique()),\n get_dropdown('Deck Penetration', 'penetration_dd', data.deck_penetration.unique()),\n get_dropdown('Player Capital', 'capital_dd', data.capital.unique()),\n html.Div(id='my-div'),\n dcc.Graph(id='raw-values-line-plot')\n ], style={'width': '80%', 'padding-left': '10%', 'padding-right': '10%'})\n\n # adapt shown table and individual run line graph based on user selection\n @dashboard.callback(\n [Output('my-div', 'children'), Output('raw-values-line-plot', 'figure')],\n [Input('player_dd', 'value'), Input('num_decks_dd', 'value'), Input('penetration_dd', 'value'),\n Input('capital_dd', 'value')]\n )\n def update_table(player_value, num_decks_value, penetration_value, capital_value):\n dff = data[data['player'] == player_value]\n dff = dff[dff['num_decks'] == num_decks_value]\n dff = dff[dff['deck_penetration'] == penetration_value]\n dff = dff[dff['capital'] == capital_value]\n\n return generate_table(dff), generate_raw_figure(dff)\n\n return dashboard\n\n\nif __name__ == '__main__':\n path = configs.project_path.parent / 'results' / 'results.csv'\n results = pd.read_csv(path)\n app = get_app(data=results)\n app.run_server(debug=True)\n"
] | [
[
"numpy.round",
"pandas.read_excel"
],
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MikeFlanigan/DMU_project | [
"6ae5f658f25f2bd10e20a94f0ccc9a6dd669bac9"
] | [
"pf_test_3d.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Wedge\n\nimport mpl_toolkits.mplot3d as a3\nimport matplotlib.colors as colors\nimport pylab as pl\nimport scipy as sp\nimport math\n\n\nrigx = 0.5\nrigy = 0.5\nrigz = 0.0\ncent = (rigx, rigy, rigz)\n\nradius = 0.5\n\ncam_info = {\"FOV pan\": 0, # degrees\n \"FOV tilt\": 0, # degrees (off of horizon, positive up)\n \"FOV width\": 40, # degrees\n \"FOV height\": 30, # degrees\n \"Zoom\": 0.2, # percent max\n \"Focal min\": 0.2, # unitless for now\n \"Focal max\": 0.6, # unitless for now\n \"FOV min_width\": 5, # degrees FOV at max zoom\n \"FOV max_width\": 90, # degrees FOV at min zoom\n \"FOV min_height\": 4, # degrees FOV at max zoom\n \"FOV max_height\": 90*4/5, # degrees FOV at min zoom\n }\n\n# hard core testing\ncam_info[\"FOV min_width\"] = 120\ncam_info[\"FOV max_width\"] = 120\ncam_info[\"FOV min_height\"] = 120\ncam_info[\"FOV max_height\"] = 120\ncam_info[\"Focal max\"] = 0.8\ncam_info[\"Zoom\"] = 1.0\n\n\n\n# camera control in degrees CCW\nctrl = 10\n\nN_particles = 100\n\ndef cart2pol(x, y):\n \"\"\"Return polar coords from cartesian coords.\n Utility fxn.\n \"\"\"\n rho = np.sqrt(x**2 + y**2)\n phi = np.arctan2(y, x)\n return(rho, phi)\n\ndef pol2cart(rho, phi):\n \"\"\"Return cartesian coords from polar coords.\n Utility fxn.\n \"\"\"\n x = rho * np.cos(phi) \n y = rho * np.sin(phi)\n return(x, y)\n\ndef polar2cart(r, theta, phi):\n theta = np.deg2rad(theta)\n phi = np.deg2rad(90-phi)\n x = r * math.sin(phi) * math.cos(theta)\n y = r * math.sin(phi) * math.sin(theta)\n z = r * math.cos(phi)\n return [x, y, z]\n\ndef sphere2cart(r, theta, phi):\n theta = np.deg2rad(theta)\n phi = np.deg2rad(90-phi)\n \n x = r * math.sin(phi) * math.cos(theta)\n y = r * math.sin(phi) * math.sin(theta)\n z = r * math.cos(phi)\n return [x, y, z]\n\ndef get_FOV_width(cam):\n m = (cam[\"FOV max_width\"] - cam[\"FOV min_width\"])\n width = -m*cam[\"Zoom\"] + cam[\"FOV max_width\"]\n return width\n\ndef get_FOV_height(cam):\n m = (cam[\"FOV max_height\"] - cam[\"FOV min_height\"])\n height = -m*cam[\"Zoom\"] + cam[\"FOV max_height\"]\n return height\n\ndef get_focal_dist(cam):\n m = (cam[\"Focal max\"] - cam[\"Focal min\"])\n f_dist = m*cam[\"Zoom\"] + cam[\"Focal min\"]\n return f_dist\n\ndef get_FOV_ends(cent, FOV):\n \"\"\"Take center of FOV and FOV in degrees [0-360]\n and return low and high endpoints in degrees.\n \"\"\"\n l = (cent - FOV/2) % 360\n h = (cent + FOV/2) % 360\n return [l,h]\n\ndef get_verts(r,theta,phi,cam):\n verts = [] # list of 3x3 patches of vertices\n rt = r # approximation for now\n x1, y1, z1 = 0, 0, 0\n x2, y2, z2 = polar2cart(r, theta+cam[\"FOV width\"]/2, phi+cam[\"FOV height\"]/2)\n x3, y3, z3 = polar2cart(r, theta-cam[\"FOV width\"]/2, phi+cam[\"FOV height\"]/2)\n x4, y4, z4 = polar2cart(r, theta+cam[\"FOV width\"]/2, phi-cam[\"FOV height\"]/2)\n x5, y5, z5 = polar2cart(r, theta-cam[\"FOV width\"]/2, phi-cam[\"FOV height\"]/2)\n \n verts.append(np.asarray([[x1,y1,z1],[x2,y2,z2],[x3,y3,z3]])) # top\n verts.append(np.asarray([[x1,y1,z1],[x3,y3,z3],[x5,y5,z5]])) # right \n verts.append(np.asarray([[x1,y1,z1],[x5,y5,z5],[x4,y4,z4]])) # bottom\n verts.append(np.asarray([[x1,y1,z1],[x4,y4,z4],[x2,y2,z2]])) # left \n verts.append(np.asarray([[x2,y2,z2],[x3,y3,z3],[x5,y5,z5],[x4,y4,z4]])) # face\n\n # apply rig translation\n for v in verts:\n v[:,0] += cent[0] # x\n v[:,1] += cent[1] # y\n v[:,2] += cent[2] # z\n return verts\n\ndef in_FOV(p, cam):\n # check if the particle is within the focal distance of the camera\n if p[0] <= get_focal_dist(cam):\n # check if the particle is within the pan FOV of the camera\n if ((abs(p[1]-cam[\"FOV pan\"]) % 360) <= cam[\"FOV width\"]/2):\n # check if the particle is within the tilt FOV of the camera\n if ((abs(p[2]-cam[\"FOV tilt\"]) % 360) <= cam[\"FOV height\"]/2):\n return True\n # if not in the FOV then:\n return False\n\nclass visual():\n \"\"\"Class for visualizing this experiment.\"\"\"\n \n def __init__(self):\n # setup plot \n ax = a3.Axes3D(pl.figure())\n\n # set axis planes to be white for visibility\n ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\n \n self.viz = ax\n\n\n def update(self, cam, particles, target, incre = None):\n pl.cla()\n\n self.viz.set_xlim([-0.1,1.1])\n self.viz.set_ylim([-0.1,1.1])\n self.viz.set_zlim([0,1.5])\n\n self.viz.set_xlabel('X')\n self.viz.set_ylabel('Y')\n self.viz.set_zlabel('Z')\n \n # center point (rig)\n self.viz.plot([cent[0]],[cent[1]],[cent[2]],'ro')\n\n for k in range(particles.shape[1]):\n r = particles[0,k]\n th = particles[1,k]\n phi = particles[2,k]\n \n x , y, z = sphere2cart(r, th, phi)\n x += cent[0]\n y += cent[1]\n if in_FOV(particles[:,k], cam):\n self.viz.plot([x],[y],[z],'rx')\n else:\n self.viz.plot([x],[y],[z],'kx')\n \n # create and plot camera FOV patches\n vtxs = get_verts(get_focal_dist(cam), cam[\"FOV pan\"], cam[\"FOV tilt\"], cam)\n for v in vtxs:\n tri = a3.art3d.Poly3DCollection([v])\n tri.set_alpha(0.2)\n tri.set_facecolor('g')\n tri.set_edgecolor('k')\n self.viz.add_collection3d(tri)\n \n if incre != None:\n## plt.savefig('./videos/imgs_01/'+str(incre))\n pl.pause(0.05)\n else: \n pl.pause(0.05)\n\ndef low_variance_resample(b, w):\n num_particles = b.shape[1]\n \n bnew = np.zeros(b.shape)\n r = np.random.rand()\n c = w[0]\n i = 0\n for m in range(num_particles):\n U = r + (m - 1)/num_particles\n while U > c:\n i += 1\n i = i % num_particles\n c += w[i]\n noise = np.asarray([np.random.rand()*radius/50-(radius/50)/2,\n np.random.randint(-1,2),\n np.random.randint(-1,2)])\n bnew[:,m] = b[:,i] + noise\n bnew[0,m] = np.clip(bnew[0,m],0.01*radius,radius) # clipping the particle distances\n## print(m,i)\n return bnew\n\ndef domain_resample(b, percent):\n \"\"\"Reject a small % of particles and replace them from a known distribution.\"\"\"\n cut = int(percent*b.shape[1])\n if cut <= 0: cut = 1\n fresh_radis = np.random.rand(1,cut)*radius\n fresh_thetas = np.random.rand(1,cut)*360\n fresh_phis = np.random.rand(1,cut)*90\n fresh_partics = np.concatenate((fresh_radis,fresh_thetas),0)\n fresh_partics = np.concatenate((fresh_partics,fresh_phis),0)\n # hm, just resampling z% of all particles seems to work well.\n # noteably this cuts down heaviest on particle dense regions.\n # however since in this filtering case, particle dense regions\n # are actually an artifact, this turns out to be good and rational.\n b = np.concatenate((b[:,cut:],fresh_partics),axis = 1)\n return b\n\n\ndef update_belief(b, cam, observation):\n weights = [1/len(b[0])]*len(b[0])\n weights = np.asarray(weights)\n\n b = domain_resample(b,0.01)\n \n i = 0\n for k in range(b.shape[1]):\n # if contained generative model would update here\n # based on states like velocity\n\n # TODO: write actual camera observation likelihoods based on zoom percents\n # assign weight to every particle based on observation\n # TODO: add tilt component to this detection check\n # check pan, check radius, check tilt\n if in_FOV(b[:,k], cam):\n if observation:\n weights[i] = 10*weights[i]\n else:\n weights[i] = 0.005*weights[i]\n else:\n pass # no observation on this particle\n\n i += 1 # increment index\n \n # normalize the weights to a valid probability distribution\n weights = weights/weights.sum()\n \n # use the low variance resampling algorithm from the Probabilistic Robotics Book\n b_new = low_variance_resample(b, weights)\n\n # TODO: maybe add a flag condition, if variance ever does get super low, resample\n # uniformly?\n \n return b_new\n \n# initialize belief to a random distribution of particles\nradis = np.random.rand(1,N_particles)*radius\nthetas = np.random.rand(1,N_particles)*360\nphis = np.random.rand(1,N_particles)*90\nbelief = np.concatenate((radis,thetas),0) \nbelief = np.concatenate((belief,phis),0) # belief is now 3d state, r, theta, phi\n\nviz = visual()\nfor i in range(550):\n\n # camera control\n # rand control for testing\n## if i % 2 == 0:\n## cam_info[\"FOV pan\"] = np.random.randint(0,360)\n## cam_info[\"FOV tilt\"] = np.random.randint(0,90)\n## cam_info[\"Zoom\"] = np.random.rand()\n \n cam_info[\"FOV pan\"] += ctrl\n cam_info[\"FOV pan\"] = cam_info[\"FOV pan\"] % 360\n cam_info[\"FOV width\"] = get_FOV_width(cam_info)\n cam_info[\"FOV height\"] = get_FOV_height(cam_info)\n \n # update belief based on current state\n belief = update_belief(belief, cam_info, False)\n\n## cam_info[\"Zoom\"] = 0.2\n\n # display what's happening\n plt.figure(1)\n viz.update(cam_info, belief, 0, incre = i)\n \nplt.close()\n\n\n\n"
] | [
[
"numpy.sqrt",
"numpy.clip",
"numpy.asarray",
"numpy.cos",
"numpy.sin",
"numpy.concatenate",
"numpy.arctan2",
"numpy.deg2rad",
"numpy.random.randint",
"numpy.random.rand",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Euro2xx/generative-compression | [
"489f4fb77620e9b6d8f3bef691d32d50f6bb09be"
] | [
"compress.py"
] | [
"#!/usr/bin/python3\nimport tensorflow as tf\nimport numpy as np\nimport pandas as pd\nimport time, os, sys\nimport argparse\n\n# User-defined\nfrom network import Network\nfrom utils import Utils\nfrom data import Data\nfrom model import Model\nfrom config import config_test, directories\n\ntf.logging.set_verbosity(tf.logging.ERROR)\n\ndef single_compress(config, args):\n start = time.time()\n\n paths = np.array([args.image_path])\n\n gan = Model(config, paths, name='single_compress', evaluate=True)\n saver = tf.train.Saver()\n\n feed_dict_init = {gan.path_placeholder: paths}\n\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)) as sess:\n # Initialize variables\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n handle = sess.run(gan.train_iterator.string_handle())\n\n if args.restore_last:\n ckpt = tf.train.get_checkpoint_state(directories.checkpoints)\n assert (ckpt.model_checkpoint_path), 'Missing checkpoint file!'\n\n saver.restore(sess, ckpt.model_checkpoint_path)\n print('Most recent {} restored.'.format(ckpt.model_checkpoint_path))\n else:\n if args.restore_path:\n new_saver = tf.train.import_meta_graph('{}.meta'.format(args.restore_path))\n new_saver.restore(sess, args.restore_path)\n print('Previous checkpoint {} restored.'.format(args.restore_path))\n\n sess.run(gan.train_iterator.initializer, feed_dict=feed_dict_init)\n eval_dict = {gan.training_phase: False, gan.handle: handle}\n\n if args.output_path is None:\n output = os.path.splitext(os.path.basename(args.image_path))\n save_path = os.path.join(directories.samples, '{}_compressed.pdf'.format(output[0]))\n else:\n save_path = args.output_path\n Utils.single_plot(0, 0, sess, gan, handle, save_path, config, single_compress=True)\n print('Reconstruction saved to', save_path)\n\n return\n\n\ndef main(**kwargs):\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-rl\", \"--restore-last\", help=\"restore last saved model\", action=\"store_true\")\n parser.add_argument(\"-r\", \"--restore-path\", help=\"path to model to be restored\", type=str)\n parser.add_argument(\"-i\", \"--image-path\", help=\"path to image to compress\", type=str)\n parser.add_argument(\"-sm\", \"--semantic-map-path\", help=\"path to corresponding semantic map\", type=str)\n parser.add_argument(\"-o\", \"--output-path\", help=\"path to output image\", type=str)\n #parser.add_argument(\"-ds\", \"--dataset\", default=\"cityscapes\", help=\"choice of training dataset. Currently only supports cityscapes/ADE20k\", choices=set((\"cityscapes\", \"ADE20k\")), type=str)\n args = parser.parse_args()\n\n # Launch training\n single_compress(config_test, args)\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.local_variables_initializer",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.logging.set_verbosity",
"tensorflow.train.Saver",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
slimgroup/Software.siahkoohi2020EAGEdlb | [
"5d20f6891ca8a17fa1695ed629c3a589ab8416bd"
] | [
"src/sample.py"
] | [
"import torch\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nimport os\nimport h5py\nfrom load_vel import overthrust_model\nfrom generator import generator\nfrom tqdm import tqdm\nfrom scipy.interpolate import interp1d\nimport matplotlib.ticker as ticker\nsfmt=ticker.ScalarFormatter(useMathText=True) \nsfmt.set_powerlimits((0, 0))\nimport matplotlib\n\nclass Sample(object):\n def __init__(self, args):\n\n if torch.cuda.is_available() and args.cuda:\n torch.backends.cudnn.enabled = True\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n self.device = torch.device('cuda')\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n print(' [*] GPU is available')\n else:\n self.device = torch.device('cpu')\n torch.set_default_tensor_type('torch.FloatTensor')\n self.build_model(args)\n \n def build_model(self, args):\n\n m0, m, self.dm, spacing, shape, origin = overthrust_model(args.vel_dir)\n self.extent = np.array([0., self.dm.shape[2]*spacing[0], \n self.dm.shape[3]*spacing[1], 0.])/1.0e3\n self.dm = self.dm.to(self.device) \n self.load(args, os.path.join(args.checkpoint_dir, args.experiment))\n self.burn_in_index = 52\n\n def load(self, args, checkpoint_dir):\n\n log_to_load = os.path.join(checkpoint_dir, 'training-logs.pt')\n assert os.path.isfile(log_to_load)\n\n if args.cuda == 0:\n training_logs = torch.load(log_to_load, map_location='cpu')\n else:\n training_logs = torch.load(log_to_load)\n print(' [*] Samples loaded')\n self.net_loss_log = training_logs['net_loss_log']\n self.model_loss_log = training_logs['model_loss_log']\n self.samples = training_logs['samples']\n assert len(self.samples) > self.burn_in_index\n \n def test(self, args):\n\n \n fig = plt.figure(\"profile\", dpi=200, figsize=(7, 2.5))\n plt.imshow(self.dm[0, 0, :, :].t().cpu().numpy(), vmin=-3.0/100.0, vmax=3.0/100.0, \n aspect=1, extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(\"True model - \" + r\"$\\delta \\mathbf{m}$\");\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"dm.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n self.samples = np.array(self.samples)\n self.samples = np.transpose(self.samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n for j in range(self.samples.shape[0]):\n self.samples[j, :, :] = self.model_topMute(self.samples[j, :, :])\n\n samples_mean = np.mean(self.samples[self.burn_in_index:, :, :], axis=0)\n samples_std = np.std(self.samples[self.burn_in_index:, :, :], axis=0)\n\n if not os.path.exists(os.path.join(args.sample_dir, args.experiment, \"Gzs\")):\n os.makedirs(os.path.join(args.sample_dir, args.experiment, \"Gzs\"))\n\n idxs = np.random.choice(self.samples[self.burn_in_index:, :, :].shape[0], 5, replace=False)\n for i in idxs:\n fig = plt.figure(\"G(z_0)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i], vmin=-3.0/100.0, vmax=3.0/100.0, aspect=1, \\\n extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}})$\".format(i));\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz\" + str(i) + \".png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"G(z_0) - G(z_i)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i] - self.samples[self.burn_in_index], \\\n vmin=-2e-2, vmax=2e-2, aspect=1, extent=self.extent, cmap=\"twilight_shifted\", interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}}) - $\".format(i) + \n r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_{{0}})$\"); \n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz_\" + str(i) + \"-Gz0.png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"G(z_0) -mean\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.samples[self.burn_in_index + i] - samples_mean, \\\n vmin=-2e-2, vmax=2e-2, aspect=1, extent=self.extent, cmap=\"twilight_shifted\", interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}$\" + r\"$_{{{}}}) - $\".format(i) + \n r\"$\\delta \\widehat { \\mathbf{m}}$\"); \n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Gzs\", \"Gz_\" + str(i) + \"-mean.png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n fig = plt.figure(\"mean of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_mean, vmin=-3.0/100.0, vmax=3.0/100.0, aspect=1, extent=self.extent, cmap=\"seismic\", \n alpha=0.6, interpolation=\"kaiser\")\n plt.title(r\"$\\delta \\widehat { \\mathbf{m}} $\" + \" - mean of \" + \n r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_j)$\" + \"'s\" + \n r\"$, \\ \\widehat{{\\mathbf{w}}}_j \\sim p_{post} ( \\mathbf{w} | \\left \\{ \\mathbf{d}_{i}, \\mathbf{q}_{i} \\right \\}_{i=1}^N, \\mathbf{z} )$\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Gz-mean.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n \n x_loc = [334, 64]\n y_loc = [65, 79]\n fig = plt.figure(\"std of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_std, vmin=0., vmax=9e-3, aspect=1, extent=self.extent, cmap=\"OrRd\", \n interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.plot(x_loc[0]*0.025, y_loc[0]*0.025, marker=\"o\", ms=10, alpha=0.9, c=\"#00b4ba\", \n markerfacecolor=\"None\", markeredgewidth=1.2)\n plt.plot(x_loc[1]*0.025, y_loc[1]*0.025, marker=\"o\", ms=10, alpha=0.9, c=\"#00b4ba\",\n markerfacecolor=\"None\", markeredgewidth=1.2)\n plt.plot(x_loc[0]*0.025, y_loc[0]*0.025, marker=\"o\", ms=10, alpha=0.2, c=\"None\",\n markerfacecolor=\"#00b4ba\", markeredgewidth=.01)\n plt.plot(x_loc[1]*0.025, y_loc[1]*0.025, marker=\"o\", ms=10, alpha=0.2, c=\"None\",\n markerfacecolor=\"#00b4ba\", markeredgewidth=.01)\n plt.title(\"Point-wise standard deviation of \" + r\"$\\mathbf{g}(\\mathbf{z},\\widehat{{\\mathbf{w}}}_j)$\" + \"'s\")\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Gz-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n norm_fac = 0.0098\n fig = plt.figure(\"profile\", dpi=200, figsize=(7, 2.5))\n plt.imshow(self.dm[0, 0, :, :].t().cpu().numpy(), vmin=-3.0/100.0, vmax=3.0/100.0, \n aspect=1, extent=self.extent, cmap=\"seismic\", alpha=0.3, interpolation=\"kaiser\")\n horiz_loz = [50, 150, 250, 350]\n for loc in horiz_loz:\n plt.plot(samples_std[:, loc]/norm_fac + loc*.025, \n np.linspace(0., 3.025, samples_std.shape[0]), \n color=\"#0a9c00\", lw=1.4, alpha=0.7);\n plt.plot(np.zeros(self.dm.shape[3]) + loc*.025, \n np.linspace(0., 3.025, samples_std.shape[0]), color=\"k\", lw=1.4, alpha=0.5);\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt);\n plt.title(\"Point-wise standard deviation vertical profiles\");\n plt.xlabel(\"Horizontal distance (km)\"); plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"overlaid-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n def sample_prior(self, args):\n\n samples_to_load = os.path.join('./checkpoint', 'prior_samples.pt')\n if os.path.isfile(samples_to_load):\n self.prior_samples = torch.load(samples_to_load)['prior_samples']\n print(' [*] Prior samples loaded')\n else:\n print(' [*] Computing samples from the prior')\n self.prior_samples = []\n self.z = torch.randn((1, 3, 512, 128), device=self.device, requires_grad=False)\n for j in tqdm(range(5000)):\n self.G = generator(\n self.dm.size(),\n num_input_channels=3, num_output_channels=1, \n num_channels_down = [16, 32, 256],\n num_channels_up = [16, 32, 256],\n num_channels_skip = [0, 0, 0],\n upsample_mode = 'bicubic',\n need1x1_up = True,\n filter_size_down=5, \n filter_size_up=3,\n filter_skip_size = 1,\n need_sigmoid=False, \n need_bias=True, \n pad='reflection', \n act_fun='LeakyReLU').to(self.device)\n self.prior_samples.append(self.G(self.z).detach().cpu().numpy())\n\n torch.save({'prior_samples': self.prior_samples}, os.path.join('./checkpoint',\n 'prior_samples.pt'))\n print(' [*] Prior samples saved')\n\n self.prior_samples = np.array(self.prior_samples)\n self.prior_samples = np.transpose(self.prior_samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n\n samples_mean = np.mean(self.prior_samples, axis=0)\n samples_std = np.std(self.prior_samples, axis=0)\n\n if not os.path.exists(os.path.join(args.sample_dir, args.experiment, \"Prior\")):\n os.makedirs(os.path.join(args.sample_dir, args.experiment, \"Prior\"))\n\n idxs = np.random.choice(1000, 5, replace=False)\n for i in idxs:\n fig = plt.figure(\"G(z_0)\", dpi=100, figsize=(7, 2.5))\n plt.imshow(self.prior_samples[i], vmin=-20.0/100.0, vmax=20.0/100.0, aspect=1, \\\n extent=self.extent, cmap=\"seismic\", alpha=0.6, interpolation=\"kaiser\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.title(r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_0)$\" \n + r\"$, \\ \\mathbf{w}_0 \\sim p_{prior} ( \\mathbf{w} )$\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \n \"Prior\", \"Gz\" + str(i) + \".png\"), format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n fig = plt.figure(\"mean of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_mean, vmin=np.min(samples_mean), vmax=-np.min(samples_mean), \n aspect=1, extent=self.extent, cmap=\"seismic\", \n alpha=0.6, interpolation=\"kaiser\")\n plt.title(\"Mean of \" + r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_i)$\" + \"'s\" + \n r\"$, \\ \\mathbf{w}_i \\sim p_{prior} ( \\mathbf{w} )$\")\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Prior-mean.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n fig = plt.figure(\"std of G(z) over random z's\", dpi=100, figsize=(7, 2.5))\n plt.imshow(samples_std, vmin=np.min(samples_std), vmax=np.max(samples_std), \n aspect=1, extent=self.extent, cmap=\"OrRd\", \n interpolation=\"kaiser\")\n plt.title(\"Point-wise standard deviation of \" + r\"$\\mathbf{g}(\\mathbf{z},\\mathbf{w}_i)$\" + \"'s\" )\n plt.colorbar(fraction=0.0145, pad=0.01, format=sfmt)\n plt.xlabel(\"Horizontal distance (km)\")\n plt.ylabel(\"Depth (km)\")\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"Prior-std.png\"), \n format=\"png\", bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n assert len(self.samples) > 0\n self.samples = np.array(self.samples)\n self.samples = np.transpose(self.samples.reshape((-1, self.dm.shape[2], self.dm.shape[3])), \n (0, 2, 1))\n for j in range(self.samples.shape[0]):\n self.samples[j, :, :] = self.model_topMute(self.samples[j, :, :])\n self.samples = self.samples[self.burn_in_index:, :, :]\n x_loc = [334, 64]\n y_loc = [65, 79]\n for ix, iy in zip(x_loc, y_loc):\n hist_init = []\n hist_trained = []\n for i in range(self.prior_samples.shape[0]):\n hist_init.append(self.prior_samples[i, iy, ix])\n for i in range(self.samples.shape[0]):\n hist_trained.append(self.samples[i, iy, ix])\n fig = plt.figure(\"hist\", dpi=100, figsize=(7, 2))\n n, bins, _ = plt.hist(np.array(hist_init), bins=np.linspace(-0.10, 0.10, 100), \n density=False, label=\"prior\", color=\"#ff8800\", alpha=0.5, histtype='bar')\n plt.hist(np.array(hist_trained), 12, density=True, label=\"posterior\", \n color=\"#00b4ba\", alpha=0.8, histtype='bar')\n plt.title(\"Point-wise histogram at (\" + \"{0:.2f}\".format(ix*.025) + \n \" km, \" + \"{0:.2f}\".format(iy*.025) + \" km)\");\n # plt.vlines(self.dm[0, 0, ix, iy], 0, 200, lw=0.8, label=r\"$\\delta \\mathbf{m}$\")\n plt.xlabel(\"Perturbation\");\n plt.legend()\n plt.grid()\n plt.xlim([-0.10, 0.10])\n plt.ylim([0, 125])\n plt.savefig(os.path.join(args.sample_dir, args.experiment, \"histogram-at-\" + \n \"{}\".format(ix) + \"x\" + \"{}\".format(iy) + \".png\"), format=\"png\", \n bbox_inches=\"tight\", dpi=300)\n plt.close(fig)\n\n # plt.stem(bins[:-1],n/10)\n\n\n def model_topMute(self, image, mute_end=20, length=1):\n\n mute_start = mute_end - length\n damp = np.zeros([image.shape[0]])\n damp[0:mute_start-1] = 0.\n damp[mute_end:] = 1.\n taper_length = mute_end - mute_start + 1\n taper = (1. + np.sin((np.pi/2.0*np.array(range(0,taper_length-1)))/(taper_length - 1)))/2.\n damp[mute_start:mute_end] = taper\n for j in range(0, image.shape[1]):\n image[:,j] = image[:,j]*damp\n return image"
] | [
[
"torch.set_default_tensor_type",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.legend",
"numpy.linspace",
"torch.load",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.mean",
"torch.cuda.is_available",
"torch.device",
"torch.randn",
"numpy.std",
"matplotlib.pyplot.close",
"matplotlib.ticker.ScalarFormatter",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.random.choice",
"numpy.min",
"matplotlib.pyplot.ylim",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shizizhou/ROAR | [
"f143605d84f30f071e24e8224014c0358e260c42",
"f143605d84f30f071e24e8224014c0358e260c42"
] | [
"ROAR/agent_module/legacy_agents/point_cloud_map_recording_agent.py",
"ROAR/agent_module/legacy_agents/floodfill_based_lane_follower.py"
] | [
"from ROAR.agent_module.agent import Agent\r\nfrom ROAR.utilities_module.data_structures_models import SensorsData\r\nfrom ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\r\nfrom ROAR.perception_module.legacy.ground_plane_point_cloud_detector import GroundPlanePointCloudDetector\r\nfrom ROAR.visualization_module.visualizer import Visualizer\r\nimport numpy as np\r\nimport cv2\r\nfrom pathlib import Path\r\nfrom ROAR.planning_module.mission_planner.waypoint_following_mission_planner import WaypointFollowingMissionPlanner\r\nfrom ROAR.planning_module.behavior_planner.behavior_planner import BehaviorPlanner\r\nfrom ROAR.planning_module.local_planner.simple_waypoint_following_local_planner import \\\r\n SimpleWaypointFollowingLocalPlanner\r\n\r\nfrom typing import List\r\nfrom ROAR.control_module.pid_controller import PIDParam\r\nfrom ROAR.control_module.pid_controller import VehiclePIDController\r\nfrom ROAR.utilities_module.data_structures_models import MapEntry\r\n\r\n\r\nclass PointCloudMapRecordingAgent(Agent):\r\n def __init__(self, **kwargs):\r\n super(PointCloudMapRecordingAgent, self).__init__(**kwargs)\r\n self.logger.debug(\"GPD2 Agent Initialized\")\r\n self.route_file_path = Path(self.agent_settings.waypoint_file_path)\r\n self.mission_planner = WaypointFollowingMissionPlanner(agent=self)\r\n # initiated right after mission plan\r\n self.controller = \\\r\n self.pid_controller = VehiclePIDController(agent=self,\r\n args_lateral=PIDParam.default_lateral_param(),\r\n args_longitudinal=PIDParam.default_longitudinal_param(),\r\n target_speed=20)\r\n self.behavior_planner = BehaviorPlanner(agent=self)\r\n self.local_planner = SimpleWaypointFollowingLocalPlanner(\r\n agent=self,\r\n controller=self.controller,\r\n mission_planner=self.mission_planner,\r\n behavior_planner=self.behavior_planner,\r\n closeness_threshold=1)\r\n self.ground_plane_point_cloud_detector = GroundPlanePointCloudDetector(agent=self, max_points_to_convert=20000,\r\n ground_tilt_threshhold=0.05)\r\n self.visualizer = Visualizer(agent=self)\r\n self.map_history: List[MapEntry] = []\r\n self.file_written = False\r\n\r\n def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:\r\n super(PointCloudMapRecordingAgent, self).run_step(sensors_data=sensors_data, vehicle=vehicle)\r\n control = self.local_planner.run_in_series()\r\n try:\r\n ground_points = self.ground_plane_point_cloud_detector.run_in_series()\r\n\r\n # print(np.shape(ground_points))\r\n color_image = self.front_rgb_camera.data.copy()\r\n ground_cords_in_2d: np.ndarray = self.visualizer.world_to_img_transform(xyz=ground_points)[:, :2]\r\n # this is a hack, without 5000 threshold, it sometimes have false detection\r\n # if np.shape(ground_cords_in_2d)[0] > 4000:\r\n # estimate left = (x_min, img_pos[1]) and right = (x_max, img_pos[1])\r\n img_positions = self.visualizer.world_to_img_transform(\r\n np.array([self.local_planner.way_points_queue[1].location.to_array()]))\r\n img_pos = img_positions[0]\r\n y_range = img_pos[1] - 5, img_pos[1] + 5\r\n indices = np.where(\r\n np.logical_and(ground_cords_in_2d[:, 1] >= y_range[0], ground_cords_in_2d[:, 1] <= y_range[1]))\r\n bar_cords = ground_cords_in_2d[indices]\r\n x_min, y_min = np.amin(bar_cords, axis=0)\r\n x_max, y_max = np.amax(bar_cords, axis=0)\r\n left_img_cord, right_img_cord = (x_min, img_pos[1]), (x_max, img_pos[1])\r\n pts = self.img_cords_to_world_cords(left_img_cord, right_img_cord)\r\n\r\n # save it\r\n self.map_history.append(MapEntry(point_a=pts[0].tolist(), point_b=pts[1].tolist()))\r\n\r\n # visualize\r\n color_image[ground_cords_in_2d[:, 1], ground_cords_in_2d[:, 0]] = [255, 255, 255]\r\n for y, x, _ in img_positions:\r\n color_image[x - 2: x + 2, y - 2:y + 2] = self.visualizer.GREEN\r\n image = cv2.line(color_image, left_img_cord, right_img_cord, (0, 255, 0), 5)\r\n cv2.imshow(\"color\", image)\r\n cv2.waitKey(1)\r\n except Exception as e:\r\n self.logger.error(e)\r\n\r\n # write it to file\r\n if self.local_planner.is_done() and self.file_written is False:\r\n self.logger.debug(\"WRITING TO FILE\")\r\n output_file_path: Path = Path(\r\n self.agent_settings.output_data_folder_path) / \"easy_map_waypoints_pointcloud_v3.json\"\r\n f = output_file_path.open('w')\r\n import json\r\n json.dump(fp=f, obj=[map_entry.dict() for map_entry in self.map_history], indent=2)\r\n f.close()\r\n self.file_written = True\r\n return control\r\n\r\n def img_cords_to_world_cords(self, left_img_cord, right_img_cord):\r\n \"\"\"\r\n Converts depth data from the Front Depth Camera to World coordinates.\r\n\r\n Args:\r\n left_img_cord ():\r\n right_img_cord ():\r\n\r\n Returns:\r\n points: World coordinates in map\r\n \"\"\"\r\n depth = self.front_depth_camera.data\r\n # depth_center = depth[img_pos_center[1]][img_pos_center[0]] * 1000\r\n depth_left = depth[left_img_cord[1]][left_img_cord[0]] * 1000\r\n depth_right = depth[right_img_cord[1]][right_img_cord[0]] * 1000\r\n\r\n # reconstruct p2d and transform it back to world space\r\n raw_p2d = np.array([\r\n [left_img_cord[0] * depth_left, left_img_cord[1] * depth_left, depth_left],\r\n # [right_img_cord[0] * depth_center, right_img_cord[1] * depth_center, depth_center],\r\n [right_img_cord[0] * depth_right, right_img_cord[1] * depth_right, depth_right]\r\n ])\r\n cords_y_minus_z_x = np.linalg.inv(self.front_depth_camera.intrinsics_matrix) @ raw_p2d.T\r\n cords_xyz_1 = np.vstack([\r\n cords_y_minus_z_x[2, :],\r\n cords_y_minus_z_x[0, :],\r\n -cords_y_minus_z_x[1, :],\r\n np.ones((1, np.shape(cords_y_minus_z_x)[1]))\r\n ])\r\n points: np.ndarray = self.vehicle.transform.get_matrix() @ self.front_depth_camera.transform.get_matrix() @ cords_xyz_1\r\n points = points.T[:, :3]\r\n return points\r\n\r\n @staticmethod\r\n def _pix2xyz(depth_img, i, j):\r\n return [\r\n depth_img[i, j] * j * 1000,\r\n depth_img[i, j] * i * 1000,\r\n depth_img[i, j] * 1000\r\n ]\r\n",
"from ROAR.agent_module.agent import Agent\r\nfrom ROAR.configurations.configuration import Configuration as AgentConfig\r\nfrom ROAR.utilities_module.data_structures_models import SensorsData\r\nfrom ROAR.utilities_module.vehicle_models import Vehicle, VehicleControl\r\nfrom ROAR.control_module.pid_controller import VehiclePIDController\r\nfrom ROAR.perception_module.legacy.flood_fill_lane_detector import FloodfillLaneDetector\r\nfrom ROAR.control_module.pid_controller import PIDParam\r\nimport cv2\r\nimport numpy as np\r\n\r\n\r\nclass FloodfillBasedLaneFollower(Agent):\r\n def __init__(self, vehicle: Vehicle, agent_settings: AgentConfig, **kwargs):\r\n super().__init__(vehicle, agent_settings, **kwargs)\r\n self.controller = VehiclePIDController(agent=self, args_lateral=PIDParam.default_lateral_param(),\r\n args_longitudinal=PIDParam.default_longitudinal_param())\r\n self.floodfill_lane_detector = FloodfillLaneDetector(agent=self)\r\n\r\n def run_step(self, sensors_data: SensorsData, vehicle: Vehicle) -> VehicleControl:\r\n super().run_step(sensors_data=sensors_data, vehicle=vehicle)\r\n try:\r\n img = self.floodfill_lane_detector.run_in_series()\r\n\r\n # left, front, right_steering dot img location\r\n left_dot_coord = (self.front_rgb_camera.image_size_x // 4, 350)\r\n center_dot_coord = (self.front_rgb_camera.image_size_x // 2, 350)\r\n right_dot_coord = (self.front_rgb_camera.image_size_x - (self.front_rgb_camera.image_size_x // 4), 350)\r\n blue = [255, 0, 0]\r\n\r\n left_ok = self._is_equal(img[left_dot_coord[::-1]], blue)\r\n center_ok = self._is_equal(img[center_dot_coord[::-1]], blue)\r\n right_ok = self._is_equal(img[right_dot_coord[::-1]], blue)\r\n\r\n result = cv2.circle(img=img, center=left_dot_coord, radius=10,\r\n color=(0, 0, 255), thickness=-1)\r\n result = cv2.circle(img=result, center=center_dot_coord, radius=10,\r\n color=(0, 0, 255), thickness=-1)\r\n result = cv2.circle(img=result, center=right_dot_coord, radius=10,\r\n color=(0, 0, 255), thickness=-1)\r\n cv2.imshow(\"rgb image\", result)\r\n cv2.waitKey(1)\r\n straight_throttle, turning_throttle, left_steering, right_steering = 0.18, 0.15, -0.4, 0.4\r\n throttle, steering = 0, 0\r\n if bool(left_ok) is False:\r\n # print(\"GO RIGHT!\")\r\n throttle = turning_throttle\r\n steering = left_steering\r\n elif bool(right_ok) is False:\r\n # print(\"GO LEFT!\")\r\n throttle = turning_throttle\r\n steering = right_steering\r\n elif center_ok:\r\n throttle, steering = straight_throttle, 0\r\n # if center_ok:\r\n # throttle, steering = 0.5, 0\r\n # elif left_ok:\r\n # throttle = 0.3\r\n # steering = -0.5\r\n # elif right_ok:\r\n # throttle = 0.3\r\n # steering = 0.5\r\n\r\n # self.logger.info(f\"Throttle = {throttle}, steering = {steering}\")\r\n return VehicleControl(throttle=throttle, steering=steering)\r\n except:\r\n return VehicleControl()\r\n\r\n @staticmethod\r\n def _is_equal(arr1, arr2):\r\n # print(sum(arr1 == arr2))\r\n return np.alltrue(arr1 == arr2)\r\n"
] | [
[
"numpy.amax",
"numpy.logical_and",
"numpy.amin",
"numpy.linalg.inv",
"numpy.shape",
"numpy.array"
],
[
"numpy.alltrue"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rhong3/Neutrophil | [
"97efb7cc01dc7b1bb06e29a824352d493bb4add5"
] | [
"scripts/Legacy/deprecated/cnnva.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 7 12:02:45 2017\n\n@authors: lwk, RH\n\n\"\"\"\n\nfrom datetime import datetime\nimport os\nimport sys\n\nimport numpy as np\nimport tensorflow as tf\nimport vgg\n\nslim = tf.contrib.slim\n\n\nclass INCEPTION():\n \"\"\"\n Use the InceptionV3 architecture\n\n \"\"\"\n\n DEFAULTS = {\n \"batch_size\": 128,\n \"dropout\": 0.8,\n \"learning_rate\": 1E-3\n }\n\n RESTORE_KEY = \"cnn_to_restore\"\n\n def __init__(self, input_dim, d_hyperparams={},\n save_graph_def=True, meta_graph=None,\n log_dir=\"./log\"):\n\n self.input_dim = input_dim\n self.__dict__.update(INCEPTION.DEFAULTS, **d_hyperparams)\n self.sesh = tf.Session()\n\n if meta_graph: # load saved graph\n model_name = os.path.basename(meta_graph)\n meta_graph = os.path.abspath(meta_graph)\n tf.train.import_meta_graph(log_dir + '/' + model_name +'.meta').restore(\n self.sesh, log_dir + '/' + model_name)\n handles = self.sesh.graph.get_collection(INCEPTION.RESTORE_KEY)\n\n\n else: # build graph from scratch\n self.datetime = datetime.now().strftime(r\"%y%m%d_%H%M\")\n handles = self._buildGraph()\n for handle in handles:\n tf.add_to_collection(INCEPTION.RESTORE_KEY, handle)\n self.sesh.run(tf.global_variables_initializer())\n\n # unpack handles for tensor ops to feed or fetch for lower layers\n (self.x_in, self.dropout_, self.is_train,\n self.y_in, self.logits, self.net, self.w, self.pred, self.pred_cost,\n self.global_step, self.train_op, self.merged_summary) = handles\n\n # print(self.batch_size,flush=True)\n # print(self.learning_rate,flush=True)\n\n if save_graph_def: # tensorboard\n try:\n os.mkdir(log_dir + '/training')\n os.mkdir(log_dir + '/validation')\n\n except(FileExistsError):\n pass\n\n self.train_logger = tf.summary.FileWriter(log_dir + '/training', self.sesh.graph)\n self.valid_logger = tf.summary.FileWriter(log_dir + '/validation', self.sesh.graph)\n\n @property\n def step(self):\n return self.global_step.eval(session=self.sesh)\n\n def _buildGraph(self):\n x_in = tf.placeholder(tf.float32, shape=[None, # enables variable batch size\n self.input_dim[0]], name=\"x\")\n x_in_reshape = tf.reshape(x_in, [-1, self.input_dim[1], self.input_dim[2], 3])\n\n dropout = tf.placeholder_with_default(1., shape=[], name=\"dropout\")\n\n y_in = tf.placeholder(dtype=tf.int8, name=\"y\")\n\n onehot_labels = tf.one_hot(indices=tf.cast(y_in, tf.int32), depth=2)\n\n is_train = tf.placeholder_with_default(True, shape=[], name=\"is_train\")\n\n logits, nett, ww = vgg.vgg_a(x_in_reshape,\n num_classes=2,\n is_training=is_train,\n dropout_keep_prob=dropout,\n spatial_squeeze=True,\n scope='vgga')\n\n pred = tf.nn.softmax(logits, name=\"prediction\")\n\n global_step = tf.Variable(0, trainable=False)\n\n pred_cost = tf.losses.softmax_cross_entropy(\n onehot_labels=onehot_labels, logits=logits)\n\n tf.summary.scalar(\"InceptionV3_cost\", pred_cost)\n\n train_op = tf.contrib.layers.optimize_loss(\n loss=pred_cost,\n learning_rate=self.learning_rate,\n global_step=global_step,\n optimizer=\"Adam\")\n\n merged_summary = tf.summary.merge_all()\n\n return (x_in, dropout, is_train,\n y_in, logits, nett, ww, pred, pred_cost,\n global_step, train_op, merged_summary)\n\n def inference(self, x, train_status=False):\n feed_dict = {self.x_in: x, self.is_train: train_status}\n fetches = [self.pred, self.net, self.w]\n return self.sesh.run(fetches, feed_dict=feed_dict)\n\n\n def get_global_step(self, X):\n x, y = X.train.next_batch(self.batch_size)\n\n feed_dict = {self.x_in: x, self.y_in: y,\n self.dropout_: self.dropout}\n\n fetches = [self.global_step]\n\n # Benchmark the learning\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n i = self.sesh.run(fetches, feed_dict)\n\n return i\n\n\n def train(self, X, max_iter=np.inf, max_epochs=np.inf, cross_validate=True,\n verbose=True, save=True, outdir=\"./out\"):\n\n if save:\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)\n\n try:\n err_train = 0\n now = datetime.now().isoformat()[11:]\n print(\"------- Training begin: {} -------\\n\".format(now), flush=True)\n\n while True:\n x, y = X.train.next_batch(self.batch_size)\n\n feed_dict = {self.x_in: x, self.y_in: y,\n self.dropout_: self.dropout}\n\n fetches = [self.merged_summary, self.logits, self.pred,\n self.pred_cost, self.global_step, self.train_op]\n\n # Benchmark the learning\n # run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n # run_metadata = tf.RunMetadata()\n\n summary, logits, pred, cost, i, _ = self.sesh.run(fetches, feed_dict\n # options=run_options,\n # run_metadata=run_metadata\n )\n\n self.train_logger.add_summary(summary, i)\n\n # get runtime statistics every 1000 runs\n # if i%1000 == 0:\n # self.logger.add_run_metadata(run_metadata, 'step%d' % i)\n err_train += cost\n\n if i % 1000 == 0 and verbose:\n # print(\"round {} --> avg cost: \".format(i), err_train / i, flush=True)\n print(\"round {} --> cost: \".format(i), cost, flush=True)\n\n elif i == max_iter and verbose:\n print(\"round {} --> cost: \".format(i), cost, flush=True)\n\n\n if i % 1000 == 0 and verbose: # and i >= 10000:\n\n if cross_validate:\n x, y = X.validation.next_batch(self.batch_size)\n feed_dict = {self.x_in: x, self.y_in: y}\n fetches = [self.pred_cost, self.merged_summary]\n valid_cost, valid_summary = self.sesh.run(fetches, feed_dict)\n\n self.valid_logger.add_summary(valid_summary, i)\n\n print(\"round {} --> CV cost: \".format(i), valid_cost, flush=True)\n print(valid_summary)\n\n elif i == max_iter and verbose: # and i >= 10000:\n\n if cross_validate:\n x, y = X.validation.next_batch(self.batch_size)\n feed_dict = {self.x_in: x, self.y_in: y}\n fetches = [self.pred_cost, self.merged_summary]\n valid_cost, valid_summary = self.sesh.run(fetches, feed_dict)\n\n self.valid_logger.add_summary(valid_summary, i)\n\n print(\"round {} --> CV cost: \".format(i), valid_cost, flush=True)\n print(valid_summary)\n\n\n \"\"\" \n if i%50000 == 0 and save:\n interfile=os.path.join(os.path.abspath(outdir), \"{}_cnn_{}\".format(\n self.datetime, \"_\".join(map(str, self.input_dim))))\n saver.save(self.sesh, interfile, global_step=self.step)\n \"\"\"\n\n if i >= max_iter or X.train.epochs_completed >= max_epochs:\n print(\"final avg cost (@ step {} = epoch {}): {}\".format(\n i, X.train.epochs_completed, err_train / i), flush=True)\n\n now = datetime.now().isoformat()[11:]\n print(\"------- Training end: {} -------\\n\".format(now), flush=True)\n\n if save:\n outfile = os.path.join(os.path.abspath(outdir), \"inception3_{}\".format(\"_\".join(['dropout', str(self.dropout)])))\n saver.save(self.sesh, outfile, global_step=None)\n try:\n self.train_logger.flush()\n self.train_logger.close()\n self.valid_logger.flush()\n self.valid_logger.close()\n\n except(AttributeError): # not logging\n continue\n break\n\n except(KeyboardInterrupt):\n print(\"final avg cost (@ step {} = epoch {}): {}\".format(\n i, X.train.epochs_completed, err_train / i), flush=True)\n\n now = datetime.now().isoformat()[11:]\n print(\"------- Training end: {} -------\\n\".format(now), flush=True)\n\n if save:\n outfile = os.path.join(os.path.abspath(outdir), \"inception3_{}\".format(\"_\".join(['dropout', str(self.dropout)])))\n saver.save(self.sesh, outfile, global_step=None)\n try:\n self.train_logger.flush()\n self.train_logger.close()\n self.valid_logger.flush()\n self.valid_logger.close()\n\n\n\n except(AttributeError): # not logging\n print('Not logging', flush=True)\n\n sys.exit(0)\n\n\n"
] | [
[
"tensorflow.nn.softmax",
"tensorflow.summary.FileWriter",
"tensorflow.Variable",
"tensorflow.placeholder_with_default",
"tensorflow.reshape",
"tensorflow.cast",
"tensorflow.placeholder",
"tensorflow.global_variables",
"tensorflow.train.import_meta_graph",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.global_variables_initializer",
"tensorflow.add_to_collection",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.summary.scalar",
"tensorflow.contrib.layers.optimize_loss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
glamod/glamod-nuim | [
"eed6f9d7d71b0c456ef39fdea6b58677e13ab50c",
"eed6f9d7d71b0c456ef39fdea6b58677e13ab50c",
"eed6f9d7d71b0c456ef39fdea6b58677e13ab50c",
"eed6f9d7d71b0c456ef39fdea6b58677e13ab50c",
"eed6f9d7d71b0c456ef39fdea6b58677e13ab50c"
] | [
"source_convert_IFF_code/321/DWD_oseas_data_extract_ws_pacific.py",
"source_convert_IFF_code/341/comb_slp_IFF_jasmin.py",
"source_convert_IFF_code/338/African _stations_late19thC_338_obseravations_pressure.py",
"source_convert_IFF_code/323/import_multiple_csv_time_convert_indian.py",
"source_convert_IFF_code/321/DWD_oseas_data_extract_station_level_pressure_pacific.py"
] | [
"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 19 09:02:55 2019\r\n\r\n@author: 67135099\r\n\"\"\"\r\n##reads all the files in the current working directory, make sure only input files are in folde\r\nimport os\r\n#import numpy as np \r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/\") \r\nfiles = [ f for f in os.listdir( os.curdir ) if os.path.isfile(f) ]\r\nfiles\r\nfirstFile = files[0]\r\nfirstFile\r\n\r\n#Checks all the files structure that all of the lines in the file are of length 403\r\n##################################################################################################\r\ncount = 0\r\nmaxLength = 0\r\n\r\nminLength = 100000\r\nwith open(firstFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n count +=1\r\n if len(line) < minLength:\r\n minLength = len(line)\r\n if len(line) > maxLength:\r\n maxLength = len(line)\r\n print(\"Count =\", count)\r\n print(\"Max = \", maxLength)\r\n print(\"Min = \", minLength)\r\n #Now going to extract some data from the file and see what is there. \r\n##################################################################################################### \r\n\r\n########################################################################################################\r\n#Now we can combine all of the data from all of the files into a single data frame\r\n\r\nimport pandas as pd\r\ngiantListOfDictionaries = []\r\nfor currentFile in files:\r\n with open(currentFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n field1 = line[21:29]\r\n field2 = line[54:58]\r\n field3 = line[58:60]\r\n field4 = line[60:62]\r\n field5 = line[65:67]\r\n field6 = line[68:70]\r\n###needd to calculate decimals offline and input here\r\n field7 = \"NULL\"\r\n field8 = \"NULL\"\r\n field9 = line[48:51]\r\n field10 = line[122:125]\r\n field11 = line[126:127]\r\n field12 = \"NULL\"\r\n field13 = \"NULL\"\r\n field14 = \"NULL\"\r\n field15 = \"321\"\r\n field16 = \"NULL\"\r\n field17 = line[0:20]\r\n field18 = \"NULL\"\r\n field19 = \"NULL\"\r\n \r\n\r\n \r\n currentDictionary = {\"File_name\": currentFile,'Station_ID': field1, \r\n \"Year\": field2,\"Month\": field3,\"Day\": field4,\"Hour\": field5,\"Minute\":field6,\r\n \"Latitude\":field7,\"Longitude\":field8,\"Elevation\":field9,\"Observed_value\":field10,\r\n \"Source_QC_flag\":field11,\"Original_observed_value\":field12,\r\n \"Original_observed_value_units\":field13,\r\n \"Gravity_corrected_by_source\":field14,\r\n \"Source_ID\":field15,\r\n \"Report_type_code\":field16,\"Station_name\":field17,\r\n \"Alias_station_name\":field18,\"Homogenization_corrected_by_source\":field19}\r\n giantListOfDictionaries.append(currentDictionary)\r\n \r\n#length of file\r\nlen(giantListOfDictionaries)\r\n\r\n\r\n#create a dataframe from dictionary\r\ngiantDataFrame = pd.DataFrame(giantListOfDictionaries)\r\n\r\n##giantDataFrame\r\n#Delete the unwanted first column File name\r\n#giantDataFrame=giantDataFrame.drop(\"File_name\",axis=1)\r\n\r\n\r\n########################################################################################### \r\n#replace missing data values in the dataframe with NUlL\r\ngiantDataFrame['Observed_value'] = pd.to_numeric(giantDataFrame['Observed_value'], errors='coerce')\r\ngiantDataFrame = giantDataFrame.drop([giantDataFrame.index[0]])\r\n#giantDataFrame['Observed_value'] = giantDataFrame['Observed_value']/10\r\ngiantDataFrame[\"Timestamp2\"] = giantDataFrame[\"Year\"].map(str) + \"-\" + giantDataFrame[\"Month\"].map(str)+ \"-\" + giantDataFrame[\"Day\"].map(str) \r\ngiantDataFrame[\"Timestamp\"] = giantDataFrame[\"Timestamp2\"].map(str)+ \" \" + giantDataFrame[\"Hour\"].map(str)+\":\"+giantDataFrame[\"Minute\"].map(str) \r\n\r\n########################################################################################\r\n##add in lat/long from a list matchinh station ids\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output\")\r\nstn_list = pd.read_csv(\"station_list.csv\")\r\ngiantDataFrame=giantDataFrame.merge(stn_list, on='Station_ID', how='left')\r\ngiantDataFrame=giantDataFrame.drop(\"Latitude_x\",axis=1)\r\ngiantDataFrame=giantDataFrame.drop(\"Longitude_x\",axis=1)\r\ngiantDataFrame[\"Longitude\"] = giantDataFrame[\"Longitude_y\"]\r\ngiantDataFrame[\"Latitude\"] = giantDataFrame[\"Latitude_y\"]\r\n##################################################################################\r\n###set order of cloumns headers in dataframe\r\ngiantDataFrame=giantDataFrame[[\"Source_ID\",'Station_ID',\"Station_name\",\"Alias_station_name\", \r\n \"Year\",\"Month\",\"Day\",\"Hour\",\"Minute\",\r\n \"Latitude\",\"Longitude\",\"Elevation\",\"Observed_value\",\r\n \"Source_QC_flag\",\"Original_observed_value\",\r\n \"Original_observed_value_units\",\r\n \"Gravity_corrected_by_source\",\r\n \"Homogenization_corrected_by_source\",\r\n \"Report_type_code\",\"Timestamp\"]]\r\n#####################################################################\r\n##'''strip leading and trailing space in each column'''\r\ngiantDataFrame['Source_ID'] = giantDataFrame['Source_ID'].str.strip() \r\ngiantDataFrame['Station_ID'] = giantDataFrame['Station_ID'].str.strip() \r\ngiantDataFrame['Station_name'] = giantDataFrame['Station_name'].str.strip()\r\ngiantDataFrame['Alias_station_name'] = giantDataFrame['Alias_station_name'].str.strip()\r\ngiantDataFrame['Year'] = giantDataFrame['Year'].str.strip()\r\ngiantDataFrame['Month'] = giantDataFrame['Month'].str.strip()\r\ngiantDataFrame['Day'] = giantDataFrame['Day'].str.strip()\r\ngiantDataFrame['Hour'] = giantDataFrame['Hour'].str.strip()\r\ngiantDataFrame['Minute'] = giantDataFrame['Minute'].str.strip()\r\n#giantDataFrame['Latitude'] = giantDataFrame['Latitude'].str.strip()\r\n#giantDataFrame['Longitude'] = giantDataFrame['Longitude'].str.strip()\r\ngiantDataFrame['Elevation'] = giantDataFrame['Elevation'].str.strip()\r\n#giantDataFrame['Observed_value'] = giantDataFrame['Observed_value'].str.strip()\r\ngiantDataFrame['Source_QC_flag'] = giantDataFrame['Source_QC_flag'].str.strip()\r\n#giantDataFrame['Original_observed_value'] = giantDataFrame['Original_observed'].str.strip()\r\ngiantDataFrame['Original_observed_value_units'] = giantDataFrame['Original_observed_value_units'].str.strip()\r\ngiantDataFrame['Gravity_corrected_by_source'] = giantDataFrame['Gravity_corrected_by_source'].str.strip()\r\ngiantDataFrame['Homogenization_corrected_by_source'] = giantDataFrame['Homogenization_corrected_by_source'].str.strip()\r\ngiantDataFrame['Report_type_code'] = giantDataFrame['Report_type_code'].str.strip()\r\n\r\n########################################################################################################\r\n\r\n\r\n\r\n############################################################\r\n#write one large pipe delimited file with all stations combined if same station named by station_id+ variable name\r\n \r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#.to_csv('CHN01000_station_level_pressure_321.psv',sep='|',index=False)\r\n\r\n####################to csv by unique staion id\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output/Wind_speed\")\r\ncats = sorted(giantDataFrame['Station_ID'].unique())\r\nfor cat in cats:\r\n outfilename = cat + \"_wind_speed_321.psv\"\r\n print(outfilename)\r\n giantDataFrame[giantDataFrame[\"Station_ID\"] == cat].to_csv(outfilename,sep='|',index=False)\r\n############################################################\r\n##write out separate pipe delimited files by station id\r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#stationList= list(set(stationsAsBigList))\r\n#for station in stationList:\r\n # print(type(station))\r\n # currentDataFrame = giantDataFrame[giantDataFrame['Station_ID'] == station]\r\n # currentDataFrame.to_csv(station + \"_pressure.csv\",sep=\",\",index=False)\r\n\r\n\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 17 14:22:41 2019\r\n\r\n@author: snoone\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport csv\r\n\r\n\r\n##import all csv files in current dir that need timezone changing to GMT based on hours offset \r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_a\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n#combine all files in the list\r\ndf1 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_b\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n\r\ndf2 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_c\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n\r\n#combine all files in the list\r\ndf3 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_d\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n\r\n#combine all files in the list\r\ndf4 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_e\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n\r\n#combine all files in the list\r\ndf5 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\nos.chdir(\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341_f\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n\r\n#combine all files in the list\r\ndf6 = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n\r\ndf_final=pd.concat([df1,df2,df3,df4,df5,df6], axis=0)\r\ndel df1,df2,df3,df4,df5,df6\r\n\r\ndf_final['Station_ID'] = df_final['Station_ID'].astype(str) \r\nos.chdir(r\"/gws/nopw/j04/c3s311a_lot2/data/level1/land/level1a_sub_daily_data/station_level_pressure/341\")\r\ncats = sorted(df_final['Station_ID'].unique())\r\nfor cat in cats:\r\n outfilename = cat + \"_station_level_pressure_341.psv\"\r\n print(outfilename)\r\n df_final[df_final[\"Station_ID\"] == cat].to_csv(outfilename,sep='|',index=False)",
"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 17 14:22:41 2019\r\n\r\n@author: snoone\r\n\"\"\"\r\nimport os\r\nimport pandas as pd\r\n\r\n\r\n\r\n\r\nos.chdir(\"D:/African _stations_late19thC_338\")\r\n\r\ndf=pd.read_excel(\"Copy of Kayes1907).xlsx\",skiprows = 1,sheet_name=\"data\") \r\n#df2=pd.read_excel(\"1953.xlsx\",sheet_name=\"j9\") \r\ndf[\"Source_ID\"]=\"338\"\r\ndf[\"Station_ID\"]=\"338-0001\"\r\ndf[\"Alias_station_name\"]=\"Null\"\r\ndf[\"Station_name\"]=\"Kays (Mali)\"\r\ndf[\"Latitude\"]= \"14.4367\"\r\ndf[\"Longitude\"]= \"-11.445\"\r\ndf[\"Elevation\"]= \"38\"\r\n#split the (GMT) timestampinto columns \r\ndf['Year'] = df['Unnamed: 0'].dt.year \r\ndf['Month'] = df['Unnamed: 0'].dt.month \r\ndf['Day'] = df['Unnamed: 0'].dt.day \r\ndf['Hour'] = df['Unnamed: 0'].dt.hour \r\ndf['Minute'] = df['Unnamed: 0'].dt.minute \r\ndf = df.drop(columns=\"Unnamed: 0\")\r\ndf[\"Hour\"]=\"7\"\r\ndf[\"Minute\"]= \"00\"\r\ndf[\"Alias_station_name\"]=df[\"Alias_station_name\"]\r\ndf[\"Source_QC_flag\"]=\"Null\"\r\ndf['Original_observed_value_units']=\"mmHg\"\r\ndf['Gravity_corrected_by_source']='Yes'\r\ndf['Homogenization_corrected_by_source']='Null'\r\ndf['Report_type_code']='Null'\r\ndf[\"Observed_value\"]=df[\"7h.1\"]\r\ndf['Original_observed_value']=df[\"7h.1\"]\r\ndf = df.fillna(0)\r\n##df.drop(df.tail(4).index,inplace=True)\r\ndf = df.astype({\"Year\": int})\r\ndf = df.astype({\"Month\": int})\r\ndf = df.astype({\"Day\": int})\r\ndf = df.astype({\"Hour\": int})\r\n#df = df.astype({\"Observed_value\": int})\r\ndf.dtypes\r\n\r\ndf['Observed_value']=df[\"Observed_value\"]* 1.33322\r\ndf[\"Timestamp2\"] = df[\"Year\"].map(str) + \"-\" + df[\"Month\"].map(str)+ \"-\" + df[\"Day\"].map(str) \r\ndf[\"Timestamp\"] = df[\"Timestamp2\"].map(str)+ \" \" + df[\"Hour\"].map(str)+\":\"+df[\"Minute\"].map(str) \r\n#join offset to sttaion id for tiemstamp conversion\r\ndf.drop(columns=[\"Timestamp2\"])\r\ndf= df[[\"Source_ID\",'Station_ID',\"Station_name\",\"Alias_station_name\", \r\n \"Year\",\"Month\",\"Day\",\"Hour\",\"Minute\",\r\n \"Latitude\",\"Longitude\",\"Elevation\",\"Observed_value\",\r\n \"Source_QC_flag\",\"Original_observed_value\",\r\n \"Original_observed_value_units\", \r\n \"Report_type_code\",\"Gravity_corrected_by_source\",\r\n \"Homogenization_corrected_by_source\", \"Timestamp\"]]\r\ndf['Original_observed_value']= round(df['Original_observed_value'],2)\r\ndf['Observed_value']= round(df['Observed_value'],1)\r\nos.chdir(\"D:/African _stations_late19thC_338/338\")\r\ndf.to_csv(\"338-0001_7.csv\", index=False, sep=\",\")\r\n\r\n#############################\r\n\r\n",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jul 17 14:22:41 2019\r\n\r\n@author: snoone\r\n\"\"\"\r\nimport os\r\nimport glob\r\nimport pandas as pd\r\nimport csv\r\nimport datetime\r\nimport numpy as np\r\n\r\n##import all csv files in current dir that need timezone changing to GMT based on hours offset \r\nos.chdir(\"D:/Indian_stationdata_187578_1884_188990/data_converted/pressure/IFF/2\")\r\nextension = 'psv'\r\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\r\n#combine all files in the list\r\ndf = pd.concat([pd.read_csv(f,delimiter='|') for f in all_filenames])\r\n#df['Observed_value']= round(df['Observed_value'],1)\r\n\r\n##convert timezones to UTC\r\n#df['Timestamp'] = df['Timestamp'].dt.tz_convert('GMT')\r\n#from datetime import datetime\r\n#date_str3 = df[\"Timestamp\"]\r\n#df[\"Timestamp\"] = datetime.strptime(date_str3, '%m/%d/%Y %H:%M')\r\ndf['Timestamp'] = pd.to_datetime(df['Timestamp'], format='%Y/%m/%d' \" \"\"%H:%M\")\r\ndf['Timestamp'] = df['Timestamp'].dt.tz_localize('Etc/GMT-6').dt.tz_convert('GMT')\r\n\r\n\r\n#split the (GMT) timestampinto columns \r\ndf['Year'] = df['Timestamp'].dt.year \r\ndf['Month'] = df['Timestamp'].dt.month \r\ndf['Day'] = df['Timestamp'].dt.day \r\ndf['Hour'] = df['Timestamp'].dt.hour \r\ndf['Minute'] = df['Timestamp'].dt.minute \r\n##delete unwanted columns \r\ndf = df.drop(columns=\"Timestamp\")\r\n##write output combined\r\n### convert elevation to metres\r\n\r\ndf['Minute']='30'\r\n \r\n\r\ndf = df[[\"Source_ID\",'Station_ID',\"Station_name\",\"Alias_station_name\", \r\n \"Year\",\"Month\",\"Day\",\"Hour\",\"Minute\",\r\n \"Latitude\",\"Longitude\",\"Elevation\",\"Observed_value\",\r\n \"Source_QC_flag\",\"Original_observed_value\",\r\n \"Original_observed_value_units\", \r\n \"Report_type_code\",\"Gravity_corrected_by_source\",\r\n \"Homogenization_corrected_by_source\"]]\r\ndf.to_csv(\"combined.csv\",index=False)\r\n\r\n##separate combined to separate files based on a column \r\n\r\nwith open('combined.csv') as fin: \r\n csvin = csv.DictReader(fin)\r\n #csvin.columns = [x.replace(' ', '_') for x in csvin.columns] \r\n # Category -> open file lookup\r\n outputs = {}\r\n for row in csvin:\r\n cat = row['Station_ID']\r\n # Open a new file and write the header\r\n if cat not in outputs:\r\n fout = open ('{}_station_level_pressure_323.psv'.format(cat), \"w\", newline = \"\")\r\n dw = csv.DictWriter(fout, fieldnames=csvin.fieldnames,delimiter='|')\r\n dw.writeheader()\r\n outputs[cat] = fout, dw\r\n # Always write the row\r\n outputs[cat][1].writerow(row)\r\n # Close all the files\r\n for fout, _ in outputs.values():\r\n fout.close()\r\n",
"\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Mar 19 09:02:55 2019\r\n\r\n@author: 67135099\r\n\"\"\"\r\n##reads all the files in the current working directory, make sure only input files are in folde\r\nimport os\r\n#import numpy as np \r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/\") \r\nfiles = [ f for f in os.listdir( os.curdir ) if os.path.isfile(f) ]\r\nfiles\r\nfirstFile = files[0]\r\nfirstFile\r\n\r\n#Checks all the files structure that all of the lines in the file are of length 403\r\n##################################################################################################\r\ncount = 0\r\nmaxLength = 0\r\n\r\nminLength = 100000\r\nwith open(firstFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n count +=1\r\n if len(line) < minLength:\r\n minLength = len(line)\r\n if len(line) > maxLength:\r\n maxLength = len(line)\r\n print(\"Count =\", count)\r\n print(\"Max = \", maxLength)\r\n print(\"Min = \", minLength)\r\n #Now going to extract some data from the file and see what is there. \r\n##################################################################################################### \r\n\r\n########################################################################################################\r\n#Now we can combine all of the data from all of the files into a single data frame\r\n\r\nimport pandas as pd\r\ngiantListOfDictionaries = []\r\nfor currentFile in files:\r\n with open(currentFile, \"r\") as theFirstFile:\r\n for line in theFirstFile:\r\n field1 = line[21:29]\r\n field2 = line[54:58]\r\n field3 = line[58:60]\r\n field4 = line[60:62]\r\n field5 = line[65:67]\r\n field6 = line[68:70]\r\n###needd to calculate decimals offline and input here\r\n field7 = \"NULL\"\r\n field8 = \"NULL\"\r\n field9 = line[48:51]\r\n field10 = line[73:78]\r\n field11 = line[79:80]\r\n field12 = \"NULL\"\r\n field13 = \"NULL\"\r\n field14 = \"NULL\"\r\n field15 = \"321\"\r\n field16 = \"NULL\"\r\n field17 = line[0:20]\r\n field18 = \"NULL\"\r\n field19 = \"NULL\"\r\n \r\n\r\n \r\n currentDictionary = {\"File_name\": currentFile,'Station_ID': field1, \r\n \"Year\": field2,\"Month\": field3,\"Day\": field4,\"Hour\": field5,\"Minute\":field6,\r\n \"Latitude\":field7,\"Longitude\":field8,\"Elevation\":field9,\"Observed_value\":field10,\r\n \"Source_QC_flag\":field11,\"Original_observed_value\":field12,\r\n \"Original_observed_value_units\":field13,\r\n \"Gravity_corrected_by_source\":field14,\r\n \"Source_ID\":field15,\r\n \"Report_type_code\":field16,\"Station_name\":field17,\r\n \"Alias_station_name\":field18,\"Homogenization_corrected_by_source\":field19}\r\n giantListOfDictionaries.append(currentDictionary)\r\n \r\n#length of file\r\nlen(giantListOfDictionaries)\r\n\r\n\r\n#create a dataframe from dictionary\r\ngiantDataFrame = pd.DataFrame(giantListOfDictionaries)\r\n\r\n##giantDataFrame\r\n#Delete the unwanted first column File name\r\n#giantDataFrame=giantDataFrame.drop(\"File_name\",axis=1)\r\n\r\n\r\n########################################################################################### \r\n#replace missing data values in the dataframe with NUlL\r\ngiantDataFrame['Observed_value'] = pd.to_numeric(giantDataFrame['Observed_value'], errors='coerce')\r\ngiantDataFrame = giantDataFrame.drop([giantDataFrame.index[0]])\r\ngiantDataFrame['Observed_value'] = giantDataFrame['Observed_value']/10\r\ngiantDataFrame[\"Timestamp2\"] = giantDataFrame[\"Year\"].map(str) + \"-\" + giantDataFrame[\"Month\"].map(str)+ \"-\" + giantDataFrame[\"Day\"].map(str) \r\ngiantDataFrame[\"Timestamp\"] = giantDataFrame[\"Timestamp2\"].map(str)+ \" \" + giantDataFrame[\"Hour\"].map(str)+\":\"+giantDataFrame[\"Minute\"].map(str) \r\n\r\n########################################################################################\r\n##add in lat/long from a list matchinh station ids\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output\")\r\nstn_list = pd.read_csv(\"station_list.csv\")\r\ngiantDataFrame=giantDataFrame.merge(stn_list, on='Station_ID', how='left')\r\ngiantDataFrame=giantDataFrame.drop(\"Latitude_x\",axis=1)\r\ngiantDataFrame=giantDataFrame.drop(\"Longitude_x\",axis=1)\r\ngiantDataFrame[\"Longitude\"] = giantDataFrame[\"Longitude_y\"]\r\ngiantDataFrame[\"Latitude\"] = giantDataFrame[\"Latitude_y\"]\r\n##################################################################################\r\n###set order of cloumns headers in dataframe\r\ngiantDataFrame=giantDataFrame[[\"Source_ID\",'Station_ID',\"Station_name\",\"Alias_station_name\", \r\n \"Year\",\"Month\",\"Day\",\"Hour\",\"Minute\",\r\n \"Latitude\",\"Longitude\",\"Elevation\",\"Observed_value\",\r\n \"Source_QC_flag\",\"Original_observed_value\",\r\n \"Original_observed_value_units\",\r\n \"Gravity_corrected_by_source\",\r\n \"Homogenization_corrected_by_source\",\r\n \"Report_type_code\",\"Timestamp\"]]\r\n#####################################################################\r\n##'''strip leading and trailing space in each column'''\r\ngiantDataFrame['Source_ID'] = giantDataFrame['Source_ID'].str.strip() \r\ngiantDataFrame['Station_ID'] = giantDataFrame['Station_ID'].str.strip() \r\ngiantDataFrame['Station_name'] = giantDataFrame['Station_name'].str.strip()\r\ngiantDataFrame['Alias_station_name'] = giantDataFrame['Alias_station_name'].str.strip()\r\ngiantDataFrame['Year'] = giantDataFrame['Year'].str.strip()\r\ngiantDataFrame['Month'] = giantDataFrame['Month'].str.strip()\r\ngiantDataFrame['Day'] = giantDataFrame['Day'].str.strip()\r\ngiantDataFrame['Hour'] = giantDataFrame['Hour'].str.strip()\r\ngiantDataFrame['Minute'] = giantDataFrame['Minute'].str.strip()\r\n#giantDataFrame['Latitude'] = giantDataFrame['Latitude'].str.strip()\r\n#giantDataFrame['Longitude'] = giantDataFrame['Longitude'].str.strip()\r\ngiantDataFrame['Elevation'] = giantDataFrame['Elevation'].str.strip()\r\n#giantDataFrame['Observed_value'] = giantDataFrame['Observed_value'].str.strip()\r\ngiantDataFrame['Source_QC_flag'] = giantDataFrame['Source_QC_flag'].str.strip()\r\n#giantDataFrame['Original_observed_value'] = giantDataFrame['Original_observed'].str.strip()\r\ngiantDataFrame['Original_observed_value_units'] = giantDataFrame['Original_observed_value_units'].str.strip()\r\ngiantDataFrame['Gravity_corrected_by_source'] = giantDataFrame['Gravity_corrected_by_source'].str.strip()\r\ngiantDataFrame['Homogenization_corrected_by_source'] = giantDataFrame['Homogenization_corrected_by_source'].str.strip()\r\ngiantDataFrame['Report_type_code'] = giantDataFrame['Report_type_code'].str.strip()\r\n\r\n########################################################################################################\r\n\r\n\r\n\r\n############################################################\r\n#write one large pipe delimited file with all stations combined if same station named by station_id+ variable name\r\n \r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#.to_csv('CHN01000_station_level_pressure_321.psv',sep='|',index=False)\r\n\r\n####################to csv by unique staion id\r\nos.chdir(r\"D:/DWD_overseas_subdy/data_pacific/output/pressure\")\r\ncats = sorted(giantDataFrame['Station_ID'].unique())\r\nfor cat in cats:\r\n outfilename = cat + \"_station_level_pressure_321.psv\"\r\n print(outfilename)\r\n giantDataFrame[giantDataFrame[\"Station_ID\"] == cat].to_csv(outfilename,sep='|',index=False)\r\n############################################################\r\n##write out separate pipe delimited files by station id\r\n#stationsAsBigList = giantDataFrame[\"Station_ID\"].tolist()\r\n#stationList= list(set(stationsAsBigList))\r\n#for station in stationList:\r\n # print(type(station))\r\n # currentDataFrame = giantDataFrame[giantDataFrame['Station_ID'] == station]\r\n # currentDataFrame.to_csv(station + \"_pressure.csv\",sep=\",\",index=False)\r\n\r\n\r\n"
] | [
[
"pandas.read_csv",
"pandas.to_numeric",
"pandas.DataFrame"
],
[
"pandas.concat",
"pandas.read_csv"
],
[
"pandas.read_excel"
],
[
"pandas.to_datetime",
"pandas.read_csv"
],
[
"pandas.read_csv",
"pandas.to_numeric",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
TragerJoswig-Jones/dvoc_model | [
"fb5d369096e436b2e4a518c4f16c3493e36aadfc"
] | [
"dvoc_model/droop.py"
] | [
"from math import pi, sin, cos\nimport numpy as np\n\nfrom dvoc_model.reference_frames import SinCos, Abc, Dq0, AlphaBeta\nfrom dvoc_model.constants import *\nfrom dvoc_model.simulate import simulate, shift_controller_angle_half\nfrom dvoc_model.elements import Node, RefFrames\nfrom dvoc_model.calculations import calculate_power\n\n\nclass Droop(Node):\n def __init__(self,\n p_ref: float,\n q_ref: float,\n m_p: float = 2.6e-3,\n m_q: float = 5.0e-3,\n v_nom: float = 120.,\n hz_nom: float = 60,\n varphi: float = pi / 2,\n omega_c: float = 2 * pi * 30,\n ref: RefFrames = RefFrames.POLAR,\n dt: float = 1.0 / 10e3,\n start_eq: bool = True,\n ):\n\n # set droop controller parameters\n self.v_nom = v_nom\n self.omega_nom = 2 * pi * hz_nom\n self.omega_c = omega_c\n self.m_p = m_p\n self.m_q = m_q\n self.sin_phi = sin(varphi)\n self.cos_phi = cos(varphi)\n\n self.p_ref = p_ref\n self.q_ref = q_ref\n self.dt = dt\n self.line = None\n\n # set low-pass filter initial values\n p_filt = 0\n q_filt = 0\n self.p = 0\n self.q = 0\n\n # initialize state variables\n if ref is RefFrames.POLAR:\n super().__init__((self.v_nom, 0, p_filt, q_filt), ref)\n else:\n v = AlphaBeta.from_polar(self.v_nom, 0)\n super().__init__((v.alpha, v.beta, p_filt, q_filt), ref)\n\n if start_eq:\n shift_controller_angle_half(self, self.ref, self.omega_nom, self.dt)\n\n if ref is RefFrames.POLAR:\n self.state_names = [\"v\", \"theta\", \"p,filt\", \"q,filt\"]\n else:\n self.state_names = [\"v,alpha\", \"v,beta\", \"p,filt\", \"q,filt\"]\n\n def low_pass_dynamics(self, x, y_filt): # TODO: Search how to derive discretization of low pass\n return self.omega_c * (x - y_filt)\n\n def update_states(self):\n self.v = self.states[0]\n self.theta = self.states[1]\n self.p_filt = self.states[2]\n self.q_filt = self.states[3]\n\n def alpha_beta_dynamics(self, x=None, t=None, u=None):\n # Power Calculation\n v = AlphaBeta.from_polar(self.v, self.theta)\n i = self.line.i\n p_calc = 1.5 * (v.alpha * i.alpha + v.beta * i.beta)\n q_calc = 1.5 * (v.beta * i.alpha - v.alpha * i.beta)\n\n self.p = p_calc\n self.q = q_calc\n\n # Low-Pass Filter\n p_filt_dt = self.low_pass_dynamics(p_calc, self.p_filt)\n q_filt_dt = self.low_pass_dynamics(q_calc, self.q_filt)\n\n p_err = self.p_filt - self.p_ref\n q_err = self.q_filt - self.q_ref\n\n # Droop Control\n dadt = None\n dbdt = None\n\n dvdt = dbdt # TODO: Implement this?\n omega = dadt # Todo: Implement this?\n return np.array([dvdt, omega, p_filt_dt, q_filt_dt])\n\n def polar_dynamics(self, x=None, t=None, u=None):\n # Power Calculation\n if x is None:\n x = self.states[:, 0]\n v_ab = AlphaBeta.from_polar(x[0], x[1])\n v = x[0]\n theta = x[1]\n p_filt = x[2]\n q_filt = x[3]\n\n i = self.line.i_alpha_beta()\n p, q = calculate_power(v_ab, i)\n\n # Low-Pass Filter\n p_filt_dt = self.low_pass_dynamics(p, p_filt)\n q_filt_dt = self.low_pass_dynamics(q, q_filt)\n\n p_err = p_filt - self.p_ref\n q_err = q_filt - self.q_ref\n\n # Droop Control\n dvdt = (self.v_nom - self.m_q * q) - v\n omega = self.omega_nom - self.m_p * p_err\n return np.array([dvdt, omega, p_filt_dt, q_filt_dt])\n\n\nif __name__ == \"__main__\":\n import numpy as np\n from matplotlib import pyplot as plt\n\n v_nom = 120\n omega_c = 2*pi*30 # Changing this value changes how quickly P & Q filt reach the average cycle values\n q_ = 0\n\n # grid parameters\n grid = Dq0(SQRT_2*v_nom, 0, 0)\n grid_omega = TWO_PI * 60\n\n # simulation time parameters\n dt = 1 / 10e3\n t = 1000e-3\n ts = np.arange(0, t, dt)\n steps = len(ts)\n\n # create a step function for dispatch (3A to 6A)\n q_ref = q_ * np.ones(steps)\n\n p_ref = 0 * np.ones(steps)\n\n p_ref[len(ts)//8:] = 250 # Add a step in the Active Power reference\n p_ref[len(ts)//4:] = 500 # Add a step in the Active Power reference\n p_ref[len(ts)//2:] = 750 # Add a step in the Active Power reference\n\n controller = Droop(0., 0.)\n data = simulate(controller, p_ref, q_ref, dt, t, Rf=0.8)#, id0=1.93, iq0=-1.23)\n\n plt.show()\n"
] | [
[
"numpy.arange",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KerasKorea/YOLK_ObjectDetector | [
"78a260746c50508fcdf1d0c56a7d4f5373b1f5bf"
] | [
"keras_ssd/data_generator/object_detection_2d_data_generator.py"
] | [
"'''\nA data generator for 2D object detection.\n\nCopyright (C) 2018 Pierluigi Ferrari\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\nfrom __future__ import division\nimport numpy as np\nimport inspect\nfrom collections import defaultdict\nimport warnings\nimport sklearn.utils\nfrom copy import deepcopy\nfrom PIL import Image\nimport cv2\nimport csv\nimport os\nimport sys\nfrom tqdm import tqdm, trange\ntry:\n import h5py\nexcept ImportError:\n warnings.warn(\"'h5py' module is missing. The fast HDF5 dataset option will be unavailable.\")\ntry:\n import json\nexcept ImportError:\n warnings.warn(\"'json' module is missing. The JSON-parser will be unavailable.\")\ntry:\n from bs4 import BeautifulSoup\nexcept ImportError:\n warnings.warn(\"'BeautifulSoup' module is missing. The XML-parser will be unavailable.\")\ntry:\n import pickle\nexcept ImportError:\n warnings.warn(\"'pickle' module is missing. You won't be able to save parsed file lists and annotations as pickled files.\")\n\nfrom keras_ssd.ssd_encoder_decoder.ssd_input_encoder import SSDInputEncoder\nfrom keras_ssd.data_generator.object_detection_2d_image_boxes_validation_utils import BoxFilter\n\nclass DegenerateBatchError(Exception):\n '''\n An exception class to be raised if a generated batch ends up being degenerate,\n e.g. if a generated batch is empty.\n '''\n pass\n\nclass DatasetError(Exception):\n '''\n An exception class to be raised if a anything is wrong with the dataset,\n in particular if you try to generate batches when no dataset was loaded.\n '''\n pass\n\nclass DataGenerator:\n '''\n A generator to generate batches of samples and corresponding labels indefinitely.\n\n Can shuffle the dataset consistently after each complete pass.\n\n Currently provides three methods to parse annotation data: A general-purpose CSV parser,\n an XML parser for the Pascal VOC datasets, and a JSON parser for the MS COCO datasets.\n If the annotations of your dataset are in a format that is not supported by these parsers,\n you could just add another parser method and still use this generator.\n\n Can perform image transformations for data conversion and data augmentation,\n for details please refer to the documentation of the `generate()` method.\n '''\n\n def __init__(self,\n load_images_into_memory=False,\n hdf5_dataset_path=None,\n filenames=None,\n filenames_type='text',\n images_dir=None,\n labels=None,\n image_ids=None,\n eval_neutral=None,\n labels_output_format=('class_id', 'xmin', 'ymin', 'xmax', 'ymax'),\n verbose=True):\n '''\n Initializes the data generator. You can either load a dataset directly here in the constructor,\n e.g. an HDF5 dataset, or you can use one of the parser methods to read in a dataset.\n\n Arguments:\n load_images_into_memory (bool, optional): If `True`, the entire dataset will be loaded into memory.\n This enables noticeably faster data generation than loading batches of images into memory ad hoc.\n Be sure that you have enough memory before you activate this option.\n hdf5_dataset_path (str, optional): The full file path of an HDF5 file that contains a dataset in the\n format that the `create_hdf5_dataset()` method produces. If you load such an HDF5 dataset, you\n don't need to use any of the parser methods anymore, the HDF5 dataset already contains all relevant\n data.\n filenames (string or list, optional): `None` or either a Python list/tuple or a string representing\n a filepath. If a list/tuple is passed, it must contain the file names (full paths) of the\n images to be used. Note that the list/tuple must contain the paths to the images,\n not the images themselves. If a filepath string is passed, it must point either to\n (1) a pickled file containing a list/tuple as described above. In this case the `filenames_type`\n argument must be set to `pickle`.\n Or\n (2) a text file. Each line of the text file contains the file name (basename of the file only,\n not the full directory path) to one image and nothing else. In this case the `filenames_type`\n argument must be set to `text` and you must pass the path to the directory that contains the\n images in `images_dir`.\n filenames_type (string, optional): In case a string is passed for `filenames`, this indicates what\n type of file `filenames` is. It can be either 'pickle' for a pickled file or 'text' for a\n plain text file.\n images_dir (string, optional): In case a text file is passed for `filenames`, the full paths to\n the images will be composed from `images_dir` and the names in the text file, i.e. this\n should be the directory that contains the images to which the text file refers.\n If `filenames_type` is not 'text', then this argument is irrelevant.\n labels (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain Numpy arrays\n that represent the labels of the dataset.\n image_ids (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain the image\n IDs of the images in the dataset.\n eval_neutral (string or list, optional): `None` or either a Python list/tuple or a string representing\n the path to a pickled file containing a list/tuple. The list/tuple must contain for each image\n a list that indicates for each ground truth object in the image whether that object is supposed\n to be treated as neutral during an evaluation.\n labels_output_format (list, optional): A list of five strings representing the desired order of the five\n items class ID, xmin, ymin, xmax, ymax in the generated ground truth data (if any). The expected\n strings are 'xmin', 'ymin', 'xmax', 'ymax', 'class_id'.\n verbose (bool, optional): If `True`, prints out the progress for some constructor operations that may\n take a bit longer.\n '''\n self.labels_output_format = labels_output_format\n self.labels_format={'class_id': labels_output_format.index('class_id'),\n 'xmin': labels_output_format.index('xmin'),\n 'ymin': labels_output_format.index('ymin'),\n 'xmax': labels_output_format.index('xmax'),\n 'ymax': labels_output_format.index('ymax')} # This dictionary is for internal use.\n\n self.dataset_size = 0 # As long as we haven't loaded anything yet, the dataset size is zero.\n self.load_images_into_memory = load_images_into_memory\n self.images = None # The only way that this list will not stay `None` is if `load_images_into_memory == True`.\n\n # `self.filenames` is a list containing all file names of the image samples (full paths).\n # Note that it does not contain the actual image files themselves. This list is one of the outputs of the parser methods.\n # In case you are loading an HDF5 dataset, this list will be `None`.\n if not filenames is None:\n if isinstance(filenames, (list, tuple)):\n self.filenames = filenames\n elif isinstance(filenames, str):\n with open(filenames, 'rb') as f:\n if filenames_type == 'pickle':\n self.filenames = pickle.load(f)\n elif filenames_type == 'text':\n self.filenames = [os.path.join(images_dir, line.strip()) for line in f]\n else:\n raise ValueError(\"`filenames_type` can be either 'text' or 'pickle'.\")\n else:\n raise ValueError(\"`filenames` must be either a Python list/tuple or a string representing a filepath (to a pickled or text file). The value you passed is neither of the two.\")\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n else:\n self.filenames = None\n\n # In case ground truth is available, `self.labels` is a list containing for each image a list (or NumPy array)\n # of ground truth bounding boxes for that image.\n if not labels is None:\n if isinstance(labels, str):\n with open(labels, 'rb') as f:\n self.labels = pickle.load(f)\n elif isinstance(labels, (list, tuple)):\n self.labels = labels\n else:\n raise ValueError(\"`labels` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.labels = None\n\n if not image_ids is None:\n if isinstance(image_ids, str):\n with open(image_ids, 'rb') as f:\n self.image_ids = pickle.load(f)\n elif isinstance(image_ids, (list, tuple)):\n self.image_ids = image_ids\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.image_ids = None\n\n if not eval_neutral is None:\n if isinstance(eval_neutral, str):\n with open(eval_neutral, 'rb') as f:\n self.eval_neutral = pickle.load(f)\n elif isinstance(eval_neutral, (list, tuple)):\n self.eval_neutral = eval_neutral\n else:\n raise ValueError(\"`image_ids` must be either a Python list/tuple or a string representing the path to a pickled file containing a list/tuple. The value you passed is neither of the two.\")\n else:\n self.eval_neutral = None\n\n if not hdf5_dataset_path is None:\n self.hdf5_dataset_path = hdf5_dataset_path\n self.load_hdf5_dataset(verbose=verbose)\n else:\n self.hdf5_dataset = None\n\n def load_hdf5_dataset(self, verbose=True):\n '''\n Loads an HDF5 dataset that is in the format that the `create_hdf5_dataset()` method\n produces.\n\n Arguments:\n verbose (bool, optional): If `True`, prints out the progress while loading\n the dataset.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset = h5py.File(self.hdf5_dataset_path, 'r')\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset or images in memory, we will shuffle this index list.\n\n if self.load_images_into_memory:\n self.images = []\n if verbose: tr = trange(self.dataset_size, desc='Loading images into memory', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.images.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n\n if self.hdf5_dataset.attrs['has_labels']:\n self.labels = []\n labels = self.hdf5_dataset['labels']\n label_shapes = self.hdf5_dataset['label_shapes']\n if verbose: tr = trange(self.dataset_size, desc='Loading labels', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.labels.append(labels[i].reshape(label_shapes[i]))\n\n if self.hdf5_dataset.attrs['has_image_ids']:\n self.image_ids = []\n image_ids = self.hdf5_dataset['image_ids']\n if verbose: tr = trange(self.dataset_size, desc='Loading image IDs', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.image_ids.append(image_ids[i])\n\n if self.hdf5_dataset.attrs['has_eval_neutral']:\n self.eval_neutral = []\n eval_neutral = self.hdf5_dataset['eval_neutral']\n if verbose: tr = trange(self.dataset_size, desc='Loading evaluation-neutrality annotations', file=sys.stdout)\n else: tr = range(self.dataset_size)\n for i in tr:\n self.eval_neutral.append(eval_neutral[i])\n\n def parse_csv(self,\n images_dir,\n labels_filename,\n input_format,\n include_classes='all',\n random_sample=False,\n ret=False,\n verbose=True):\n '''\n Arguments:\n images_dir (str): The path to the directory that contains the images.\n labels_filename (str): The filepath to a CSV file that contains one ground truth bounding box per line\n and each line contains the following six items: image file name, class ID, xmin, xmax, ymin, ymax.\n The six items do not have to be in a specific order, but they must be the first six columns of\n each line. The order of these items in the CSV file must be specified in `input_format`.\n The class ID is an integer greater than zero. Class ID 0 is reserved for the background class.\n `xmin` and `xmax` are the left-most and right-most absolute horizontal coordinates of the box,\n `ymin` and `ymax` are the top-most and bottom-most absolute vertical coordinates of the box.\n The image name is expected to be just the name of the image file without the directory path\n at which the image is located.\n input_format (list): A list of six strings representing the order of the six items\n image file name, class ID, xmin, xmax, ymin, ymax in the input CSV file. The expected strings\n are 'image_name', 'xmin', 'xmax', 'ymin', 'ymax', 'class_id'.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n random_sample (float, optional): Either `False` or a float in `[0,1]`. If this is `False`, the\n full dataset will be used by the generator. If this is a float in `[0,1]`, a randomly sampled\n fraction of the dataset will be used, where `random_sample` is the fraction of the dataset\n to be used. For example, if `random_sample = 0.2`, 20 precent of the dataset will be randomly selected,\n the rest will be ommitted. The fraction refers to the number of images, not to the number\n of boxes, i.e. each image that will be added to the dataset will always be added with all\n of its boxes.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, and image IDs.\n '''\n\n # Set class members.\n self.images_dir = images_dir\n self.labels_filename = labels_filename\n self.input_format = input_format\n self.include_classes = include_classes\n\n # Before we begin, make sure that we have a labels_filename and an input_format\n if self.labels_filename is None or self.input_format is None:\n raise ValueError(\"`labels_filename` and/or `input_format` have not been set yet. You need to pass them as arguments.\")\n\n # Erase data that might have been parsed before\n self.filenames = []\n self.image_ids = []\n self.labels = []\n\n # First, just read in the CSV file lines and sort them.\n\n data = []\n\n with open(self.labels_filename, newline='') as csvfile:\n csvread = csv.reader(csvfile, delimiter=',')\n next(csvread) # Skip the header row.\n for row in csvread: # For every line (i.e for every bounding box) in the CSV file...\n if self.include_classes == 'all' or int(row[self.input_format.index('class_id')].strip()) in self.include_classes: # If the class_id is among the classes that are to be included in the dataset...\n box = [] # Store the box class and coordinates here\n box.append(row[self.input_format.index('image_name')].strip()) # Select the image name column in the input format and append its content to `box`\n for element in self.labels_output_format: # For each element in the output format (where the elements are the class ID and the four box coordinates)...\n box.append(int(row[self.input_format.index(element)].strip())) # ...select the respective column in the input format and append it to `box`.\n data.append(box)\n\n data = sorted(data) # The data needs to be sorted, otherwise the next step won't give the correct result\n\n # Now that we've made sure that the data is sorted by file names,\n # we can compile the actual samples and labels lists\n\n current_file = data[0][0] # The current image for which we're collecting the ground truth boxes\n current_image_id = data[0][0].split('.')[0] # The image ID will be the portion of the image name before the first dot.\n current_labels = [] # The list where we collect all ground truth boxes for a given image\n add_to_dataset = False\n for i, box in enumerate(data):\n\n if box[0] == current_file: # If this box (i.e. this line of the CSV file) belongs to the current image file\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else: # If this box belongs to a new image file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n current_labels = [] # Reset the labels list because this is a new file.\n current_file = box[0]\n current_image_id = box[0].split('.')[0]\n current_labels.append(box[1:])\n if i == len(data)-1: # If this is the last line of the CSV file\n if random_sample: # In case we're not using the full dataset, but a random sample of it.\n p = np.random.uniform(0,1)\n if p >= (1-random_sample):\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n else:\n self.labels.append(np.stack(current_labels, axis=0))\n self.filenames.append(os.path.join(self.images_dir, current_file))\n self.image_ids.append(current_image_id)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret: # In case we want to return these\n return self.images, self.filenames, self.labels, self.image_ids\n\n def parse_xml(self,\n images_dirs,\n image_set_filenames,\n annotations_dirs=[],\n classes=['background',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat',\n 'chair', 'cow', 'diningtable', 'dog',\n 'horse', 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor'],\n include_classes = 'all',\n exclude_truncated=False,\n exclude_difficult=False,\n ret=False,\n verbose=True):\n '''\n This is an XML parser for the Pascal VOC datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the data format and XML tags of the Pascal VOC datasets.\n\n Arguments:\n images_dirs (list): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for Pascal VOC 2007, another that contains\n the images for Pascal VOC 2012, etc.).\n image_set_filenames (list): A list of strings, where each string is the path of the text file with the image\n set to be loaded. Must be one file per image directory given. These text files define what images in the\n respective image directories are to be part of the dataset and simply contains one image ID per line\n and nothing else.\n annotations_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains the annotations (XML files) that belong to the images in the respective image directories given.\n The directories must contain one XML file per image and the name of an XML file must be the image ID\n of the image it belongs to. The content of the XML files must be in the Pascal VOC format.\n classes (list, optional): A list containing the names of the object classes as found in the\n `name` XML tags. Must include the class `background` as the first list item. The order of this list\n defines the class IDs.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n exclude_truncated (bool, optional): If `True`, excludes boxes that are labeled as 'truncated'.\n exclude_difficult (bool, optional): If `True`, excludes boxes that are labeled as 'difficult'.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels, image IDs,\n and a list indicating which boxes are annotated with the label \"difficult\".\n '''\n # Set class members.\n self.images_dirs = images_dirs\n self.annotations_dirs = annotations_dirs\n self.image_set_filenames = image_set_filenames\n self.classes = classes\n self.include_classes = include_classes\n\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n self.eval_neutral = []\n if not annotations_dirs:\n self.labels = None\n self.eval_neutral = None\n annotations_dirs = [None] * len(images_dirs)\n\n for images_dir, image_set_filename, annotations_dir in zip(images_dirs, image_set_filenames, annotations_dirs):\n # Read the image set file that so that we know all the IDs of all the images to be included in the dataset.\n with open(image_set_filename) as f:\n image_ids = [line.strip() for line in f] # Note: These are strings, not integers.\n self.image_ids += image_ids\n\n if verbose: it = tqdm(image_ids, desc=\"Processing image set '{}'\".format(os.path.basename(image_set_filename)), file=sys.stdout)\n else: it = image_ids\n\n # Loop over all images in this dataset.\n for image_id in it:\n\n filename = '{}'.format(image_id) + '.jpg'\n self.filenames.append(os.path.join(images_dir, filename))\n\n if not annotations_dir is None:\n # Parse the XML file for this image.\n with open(os.path.join(annotations_dir, image_id + '.xml')) as f:\n soup = BeautifulSoup(f, 'xml')\n\n folder = soup.folder.text # In case we want to return the folder in addition to the image file name. Relevant for determining which dataset an image belongs to.\n #filename = soup.filename.text\n\n boxes = [] # We'll store all boxes for this image here.\n eval_neutr = [] # We'll store whether a box is annotated as \"difficult\" here.\n objects = soup.find_all('object') # Get a list of all objects in this image.\n\n # Parse the data for each object.\n for obj in objects:\n class_name = obj.find('name', recursive=False).text\n class_id = self.classes.index(class_name)\n # Check whether this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not class_id in self.include_classes): continue\n pose = obj.find('pose', recursive=False).text\n truncated = int(obj.find('truncated', recursive=False).text)\n if exclude_truncated and (truncated == 1): continue\n difficult = int(obj.find('difficult', recursive=False).text)\n if exclude_difficult and (difficult == 1): continue\n # Get the bounding box coordinates.\n bndbox = obj.find('bndbox', recursive=False)\n xmin = int(bndbox.xmin.text)\n ymin = int(bndbox.ymin.text)\n xmax = int(bndbox.xmax.text)\n ymax = int(bndbox.ymax.text)\n item_dict = {'folder': folder,\n 'image_name': filename,\n 'image_id': image_id,\n 'class_name': class_name,\n 'class_id': class_id,\n 'pose': pose,\n 'truncated': truncated,\n 'difficult': difficult,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n if difficult: eval_neutr.append(True)\n else: eval_neutr.append(False)\n\n self.labels.append(boxes)\n self.eval_neutral.append(eval_neutr)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def parse_json(self,\n images_dirs,\n annotations_filenames,\n ground_truth_available=False,\n include_classes='all',\n ret=False,\n verbose=True):\n '''\n This is an JSON parser for the MS COCO datasets. It might be applicable to other datasets with minor changes to\n the code, but in its current form it expects the JSON format of the MS COCO datasets.\n\n Arguments:\n images_dirs (list, optional): A list of strings, where each string is the path of a directory that\n contains images that are to be part of the dataset. This allows you to aggregate multiple datasets\n into one (e.g. one directory that contains the images for MS COCO Train 2014, another one for MS COCO\n Val 2014, another one for MS COCO Train 2017 etc.).\n annotations_filenames (list): A list of strings, where each string is the path of the JSON file\n that contains the annotations for the images in the respective image directories given, i.e. one\n JSON file per image directory that contains the annotations for all images in that directory.\n The content of the JSON files must be in MS COCO object detection format. Note that these annotations\n files do not necessarily need to contain ground truth information. MS COCO also provides annotations\n files without ground truth information for the test datasets, called `image_info_[...].json`.\n ground_truth_available (bool, optional): Set `True` if the annotations files contain ground truth information.\n include_classes (list, optional): Either 'all' or a list of integers containing the class IDs that\n are to be included in the dataset. If 'all', all ground truth boxes will be included in the dataset.\n ret (bool, optional): Whether or not to return the outputs of the parser.\n verbose (bool, optional): If `True`, prints out the progress for operations that may take a bit longer.\n\n Returns:\n None by default, optionally lists for whichever are available of images, image filenames, labels and image IDs.\n '''\n self.images_dirs = images_dirs\n self.annotations_filenames = annotations_filenames\n self.include_classes = include_classes\n # Erase data that might have been parsed before.\n self.filenames = []\n self.image_ids = []\n self.labels = []\n if not ground_truth_available:\n self.labels = None\n\n # Build the dictionaries that map between class names and class IDs.\n with open(annotations_filenames[0], 'r') as f:\n annotations = json.load(f)\n # Unfortunately the 80 MS COCO class IDs are not all consecutive. They go\n # from 1 to 90 and some numbers are skipped. Since the IDs that we feed\n # into a neural network must be consecutive, we'll save both the original\n # (non-consecutive) IDs as well as transformed maps.\n # We'll save both the map between the original\n self.cats_to_names = {} # The map between class names (values) and their original IDs (keys)\n self.classes_to_names = [] # A list of the class names with their indices representing the transformed IDs\n self.classes_to_names.append('background') # Need to add the background class first so that the indexing is right.\n self.cats_to_classes = {} # A dictionary that maps between the original (keys) and the transformed IDs (values)\n self.classes_to_cats = {} # A dictionary that maps between the transformed (keys) and the original IDs (values)\n for i, cat in enumerate(annotations['categories']):\n self.cats_to_names[cat['id']] = cat['name']\n self.classes_to_names.append(cat['name'])\n self.cats_to_classes[cat['id']] = i + 1\n self.classes_to_cats[i + 1] = cat['id']\n\n # Iterate over all datasets.\n for images_dir, annotations_filename in zip(self.images_dirs, self.annotations_filenames):\n # Load the JSON file.\n with open(annotations_filename, 'r') as f:\n annotations = json.load(f)\n\n if ground_truth_available:\n # Create the annotations map, a dictionary whose keys are the image IDs\n # and whose values are the annotations for the respective image ID.\n image_ids_to_annotations = defaultdict(list)\n for annotation in annotations['annotations']:\n image_ids_to_annotations[annotation['image_id']].append(annotation)\n\n if verbose: it = tqdm(annotations['images'], desc=\"Processing '{}'\".format(os.path.basename(annotations_filename)), file=sys.stdout)\n else: it = annotations['images']\n\n # Loop over all images in this dataset.\n for img in it:\n\n self.filenames.append(os.path.join(images_dir, img['file_name']))\n self.image_ids.append(img['id'])\n\n if ground_truth_available:\n # Get all annotations for this image.\n annotations = image_ids_to_annotations[img['id']]\n boxes = []\n for annotation in annotations:\n cat_id = annotation['category_id']\n # Check if this class is supposed to be included in the dataset.\n if (not self.include_classes == 'all') and (not cat_id in self.include_classes): continue\n # Transform the original class ID to fit in the sequence of consecutive IDs.\n class_id = self.cats_to_classes[cat_id]\n xmin = annotation['bbox'][0]\n ymin = annotation['bbox'][1]\n width = annotation['bbox'][2]\n height = annotation['bbox'][3]\n # Compute `xmax` and `ymax`.\n xmax = xmin + width\n ymax = ymin + height\n item_dict = {'image_name': img['file_name'],\n 'image_id': img['id'],\n 'class_id': class_id,\n 'xmin': xmin,\n 'ymin': ymin,\n 'xmax': xmax,\n 'ymax': ymax}\n box = []\n for item in self.labels_output_format:\n box.append(item_dict[item])\n boxes.append(box)\n self.labels.append(boxes)\n\n self.dataset_size = len(self.filenames)\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32)\n if self.load_images_into_memory:\n self.images = []\n if verbose: it = tqdm(self.filenames, desc='Loading images into memory', file=sys.stdout)\n else: it = self.filenames\n for filename in it:\n with Image.open(filename) as image:\n self.images.append(np.array(image, dtype=np.uint8))\n\n if ret:\n return self.images, self.filenames, self.labels, self.image_ids\n\n def create_hdf5_dataset(self,\n file_path='dataset.h5',\n resize=False,\n variable_image_size=True,\n verbose=True):\n '''\n Converts the currently loaded dataset into a HDF5 file. This HDF5 file contains all\n images as uncompressed arrays in a contiguous block of memory, which allows for them\n to be loaded faster. Such an uncompressed dataset, however, may take up considerably\n more space on your hard drive than the sum of the source images in a compressed format\n such as JPG or PNG.\n\n It is recommended that you always convert the dataset into an HDF5 dataset if you\n have enugh hard drive space since loading from an HDF5 dataset accelerates the data\n generation noticeably.\n\n Note that you must load a dataset (e.g. via one of the parser methods) before creating\n an HDF5 dataset from it.\n\n The created HDF5 dataset will remain open upon its creation so that it can be used right\n away.\n\n Arguments:\n file_path (str, optional): The full file path under which to store the HDF5 dataset.\n You can load this output file via the `DataGenerator` constructor in the future.\n resize (tuple, optional): `False` or a 2-tuple `(height, width)` that represents the\n target size for the images. All images in the dataset will be resized to this\n target size before they will be written to the HDF5 file. If `False`, no resizing\n will be performed.\n variable_image_size (bool, optional): The only purpose of this argument is that its\n value will be stored in the HDF5 dataset in order to be able to quickly find out\n whether the images in the dataset all have the same size or not.\n verbose (bool, optional): Whether or not prit out the progress of the dataset creation.\n\n Returns:\n None.\n '''\n\n self.hdf5_dataset_path = file_path\n\n dataset_size = len(self.filenames)\n\n # Create the HDF5 file.\n hdf5_dataset = h5py.File(file_path, 'w')\n\n # Create a few attributes that tell us what this dataset contains.\n # The dataset will obviously always contain images, but maybe it will\n # also contain labels, image IDs, etc.\n hdf5_dataset.attrs.create(name='has_labels', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_image_ids', data=False, shape=None, dtype=np.bool_)\n hdf5_dataset.attrs.create(name='has_eval_neutral', data=False, shape=None, dtype=np.bool_)\n # It's useful to be able to quickly check whether the images in a dataset all\n # have the same size or not, so add a boolean attribute for that.\n if variable_image_size and not resize:\n hdf5_dataset.attrs.create(name='variable_image_size', data=True, shape=None, dtype=np.bool_)\n else:\n hdf5_dataset.attrs.create(name='variable_image_size', data=False, shape=None, dtype=np.bool_)\n\n # Create the dataset in which the images will be stored as flattened arrays.\n # This allows us, among other things, to store images of variable size.\n hdf5_images = hdf5_dataset.create_dataset(name='images',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.uint8))\n\n # Create the dataset that will hold the image heights, widths and channels that\n # we need in order to reconstruct the images from the flattened arrays later.\n hdf5_image_shapes = hdf5_dataset.create_dataset(name='image_shapes',\n shape=(dataset_size, 3),\n maxshape=(None, 3),\n dtype=np.int32)\n\n if not (self.labels is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_labels = hdf5_dataset.create_dataset(name='labels',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.int32))\n\n # Create the dataset that will hold the dimensions of the labels arrays for\n # each image so that we can restore the labels from the flattened arrays later.\n hdf5_label_shapes = hdf5_dataset.create_dataset(name='label_shapes',\n shape=(dataset_size, 2),\n maxshape=(None, 2),\n dtype=np.int32)\n\n hdf5_dataset.attrs.modify(name='has_labels', value=True)\n\n if not (self.image_ids is None):\n\n hdf5_image_ids = hdf5_dataset.create_dataset(name='image_ids',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=str))\n\n hdf5_dataset.attrs.modify(name='has_image_ids', value=True)\n\n if not (self.eval_neutral is None):\n\n # Create the dataset in which the labels will be stored as flattened arrays.\n hdf5_eval_neutral = hdf5_dataset.create_dataset(name='eval_neutral',\n shape=(dataset_size,),\n maxshape=(None),\n dtype=h5py.special_dtype(vlen=np.bool_))\n\n hdf5_dataset.attrs.modify(name='has_eval_neutral', value=True)\n\n if verbose:\n tr = trange(dataset_size, desc='Creating HDF5 dataset', file=sys.stdout)\n else:\n tr = range(dataset_size)\n\n # Iterate over all images in the dataset.\n for i in tr:\n\n # Store the image.\n with Image.open(self.filenames[i]) as image:\n\n image = np.asarray(image, dtype=np.uint8)\n\n # Make sure all images end up having three channels.\n if image.ndim == 2:\n image = np.stack([image] * 3, axis=-1)\n elif image.ndim == 3:\n if image.shape[2] == 1:\n image = np.concatenate([image] * 3, axis=-1)\n elif image.shape[2] == 4:\n image = image[:,:,:3]\n\n if resize:\n image = cv2.resize(image, dsize=(resize[1], resize[0]))\n\n # Flatten the image array and write it to the images dataset.\n hdf5_images[i] = image.reshape(-1)\n # Write the image's shape to the image shapes dataset.\n hdf5_image_shapes[i] = image.shape\n\n # Store the ground truth if we have any.\n if not (self.labels is None):\n\n labels = np.asarray(self.labels[i])\n # Flatten the labels array and write it to the labels dataset.\n hdf5_labels[i] = labels.reshape(-1)\n # Write the labels' shape to the label shapes dataset.\n hdf5_label_shapes[i] = labels.shape\n\n # Store the image ID if we have one.\n if not (self.image_ids is None):\n\n hdf5_image_ids[i] = self.image_ids[i]\n\n # Store the evaluation-neutrality annotations if we have any.\n if not (self.eval_neutral is None):\n\n hdf5_eval_neutral[i] = self.eval_neutral[i]\n\n hdf5_dataset.close()\n self.hdf5_dataset = h5py.File(file_path, 'r')\n self.hdf5_dataset_path = file_path\n self.dataset_size = len(self.hdf5_dataset['images'])\n self.dataset_indices = np.arange(self.dataset_size, dtype=np.int32) # Instead of shuffling the HDF5 dataset, we will shuffle this index list.\n\n def generate(self,\n batch_size=32,\n shuffle=True,\n transformations=[],\n label_encoder=None,\n returns={'processed_images', 'encoded_labels'},\n keep_images_without_gt=False,\n degenerate_box_handling='remove'):\n '''\n Generates batches of samples and (optionally) corresponding labels indefinitely.\n\n Can shuffle the samples consistently after each complete pass.\n\n Optionally takes a list of arbitrary image transformations to apply to the\n samples ad hoc.\n\n Arguments:\n batch_size (int, optional): The size of the batches to be generated.\n shuffle (bool, optional): Whether or not to shuffle the dataset before each pass.\n This option should always be `True` during training, but it can be useful to turn shuffling off\n for debugging or if you're using the generator for prediction.\n transformations (list, optional): A list of transformations that will be applied to the images and labels\n in the given order. Each transformation is a callable that takes as input an image (as a Numpy array)\n and optionally labels (also as a Numpy array) and returns an image and optionally labels in the same\n format.\n label_encoder (callable, optional): Only relevant if labels are given. A callable that takes as input the\n labels of a batch (as a list of Numpy arrays) and returns some structure that represents those labels.\n The general use case for this is to convert labels from their input format to a format that a given object\n detection model needs as its training targets.\n returns (set, optional): A set of strings that determines what outputs the generator yields. The generator's output\n is always a tuple that contains the outputs specified in this set and only those. If an output is not available,\n it will be `None`. The output tuple can contain the following outputs according to the specified keyword strings:\n * 'processed_images': An array containing the processed images. Will always be in the outputs, so it doesn't\n matter whether or not you include this keyword in the set.\n * 'encoded_labels': The encoded labels tensor. Will always be in the outputs if a label encoder is given,\n so it doesn't matter whether or not you include this keyword in the set if you pass a label encoder.\n * 'matched_anchors': Only available if `labels_encoder` is an `SSDInputEncoder` object. The same as 'encoded_labels',\n but containing anchor box coordinates for all matched anchor boxes instead of ground truth coordinates.\n This can be useful to visualize what anchor boxes are being matched to each ground truth box. Only available\n in training mode.\n * 'processed_labels': The processed, but not yet encoded labels. This is a list that contains for each\n batch image a Numpy array with all ground truth boxes for that image. Only available if ground truth is available.\n * 'filenames': A list containing the file names (full paths) of the images in the batch.\n * 'image_ids': A list containing the integer IDs of the images in the batch. Only available if there\n are image IDs available.\n * 'evaluation-neutral': A nested list of lists of booleans. Each list contains `True` or `False` for every ground truth\n bounding box of the respective image depending on whether that bounding box is supposed to be evaluation-neutral (`True`)\n or not (`False`). May return `None` if there exists no such concept for a given dataset. An example for\n evaluation-neutrality are the ground truth boxes annotated as \"difficult\" in the Pascal VOC datasets, which are\n usually treated to be neutral in a model evaluation.\n * 'inverse_transform': A nested list that contains a list of \"inverter\" functions for each item in the batch.\n These inverter functions take (predicted) labels for an image as input and apply the inverse of the transformations\n that were applied to the original image to them. This makes it possible to let the model make predictions on a\n transformed image and then convert these predictions back to the original image. This is mostly relevant for\n evaluation: If you want to evaluate your model on a dataset with varying image sizes, then you are forced to\n transform the images somehow (e.g. by resizing or cropping) to make them all the same size. Your model will then\n predict boxes for those transformed images, but for the evaluation you will need predictions with respect to the\n original images, not with respect to the transformed images. This means you will have to transform the predicted\n box coordinates back to the original image sizes. Note that for each image, the inverter functions for that\n image need to be applied in the order in which they are given in the respective list for that image.\n * 'original_images': A list containing the original images in the batch before any processing.\n * 'original_labels': A list containing the original ground truth boxes for the images in this batch before any\n processing. Only available if ground truth is available.\n The order of the outputs in the tuple is the order of the list above. If `returns` contains a keyword for an\n output that is unavailable, that output omitted in the yielded tuples and a warning will be raised.\n keep_images_without_gt (bool, optional): If `False`, images for which there aren't any ground truth boxes before\n any transformations have been applied will be removed from the batch. If `True`, such images will be kept\n in the batch.\n degenerate_box_handling (str, optional): How to handle degenerate boxes, which are boxes that have `xmax <= xmin` and/or\n `ymax <= ymin`. Degenerate boxes can sometimes be in the dataset, or non-degenerate boxes can become degenerate\n after they were processed by transformations. Note that the generator checks for degenerate boxes after all\n transformations have been applied (if any), but before the labels were passed to the `label_encoder` (if one was given).\n Can be one of 'warn' or 'remove'. If 'warn', the generator will merely print a warning to let you know that there\n are degenerate boxes in a batch. If 'remove', the generator will remove degenerate boxes from the batch silently.\n\n Yields:\n The next batch as a tuple of items as defined by the `returns` argument.\n '''\n\n if self.dataset_size == 0:\n raise DatasetError(\"Cannot generate batches because you did not load a dataset.\")\n\n #############################################################################################\n # Warn if any of the set returns aren't possible.\n #############################################################################################\n\n if self.labels is None:\n if any([ret in returns for ret in ['original_labels', 'processed_labels', 'encoded_labels', 'matched_anchors', 'evaluation-neutral']]):\n warnings.warn(\"Since no labels were given, none of 'original_labels', 'processed_labels', 'evaluation-neutral', 'encoded_labels', and 'matched_anchors' \" +\n \"are possible returns, but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif label_encoder is None:\n if any([ret in returns for ret in ['encoded_labels', 'matched_anchors']]):\n warnings.warn(\"Since no label encoder was given, 'encoded_labels' and 'matched_anchors' aren't possible returns, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n elif not isinstance(label_encoder, SSDInputEncoder):\n if 'matched_anchors' in returns:\n warnings.warn(\"`label_encoder` is not an `SSDInputEncoder` object, therefore 'matched_anchors' is not a possible return, \" +\n \"but you set `returns = {}`. The impossible returns will be `None`.\".format(returns))\n\n #############################################################################################\n # Do a few preparatory things like maybe shuffling the dataset initially.\n #############################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n if degenerate_box_handling == 'remove':\n box_filter = BoxFilter(check_overlap=False,\n check_min_area=False,\n check_degenerate=True,\n labels_format=self.labels_format)\n\n # Override the labels formats of all the transformations to make sure they are set correctly.\n if not (self.labels is None):\n for transform in transformations:\n transform.labels_format = self.labels_format\n\n #############################################################################################\n # Generate mini batches.\n #############################################################################################\n\n current = 0\n\n while True:\n\n batch_X, batch_y = [], []\n\n if current >= self.dataset_size:\n current = 0\n\n #########################################################################################\n # Maybe shuffle the dataset if a full pass over the dataset has finished.\n #########################################################################################\n\n if shuffle:\n objects_to_shuffle = [self.dataset_indices]\n if not (self.filenames is None):\n objects_to_shuffle.append(self.filenames)\n if not (self.labels is None):\n objects_to_shuffle.append(self.labels)\n if not (self.image_ids is None):\n objects_to_shuffle.append(self.image_ids)\n if not (self.eval_neutral is None):\n objects_to_shuffle.append(self.eval_neutral)\n shuffled_objects = sklearn.utils.shuffle(*objects_to_shuffle)\n for i in range(len(objects_to_shuffle)):\n objects_to_shuffle[i][:] = shuffled_objects[i]\n\n #########################################################################################\n # Get the images, (maybe) image IDs, (maybe) labels, etc. for this batch.\n #########################################################################################\n\n # We prioritize our options in the following order:\n # 1) If we have the images already loaded in memory, get them from there.\n # 2) Else, if we have an HDF5 dataset, get the images from there.\n # 3) Else, if we have neither of the above, we'll have to load the individual image\n # files from disk.\n batch_indices = self.dataset_indices[current:current+batch_size]\n if not (self.images is None):\n for i in batch_indices:\n batch_X.append(self.images[i])\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n elif not (self.hdf5_dataset is None):\n for i in batch_indices:\n batch_X.append(self.hdf5_dataset['images'][i].reshape(self.hdf5_dataset['image_shapes'][i]))\n if not (self.filenames is None):\n batch_filenames = self.filenames[current:current+batch_size]\n else:\n batch_filenames = None\n else:\n batch_filenames = self.filenames[current:current+batch_size]\n for filename in batch_filenames:\n with Image.open(filename) as image:\n batch_X.append(np.array(image, dtype=np.uint8))\n\n # Get the labels for this batch (if there are any).\n if not (self.labels is None):\n batch_y = deepcopy(self.labels[current:current+batch_size])\n else:\n batch_y = None\n\n if not (self.eval_neutral is None):\n batch_eval_neutral = self.eval_neutral[current:current+batch_size]\n else:\n batch_eval_neutral = None\n\n # Get the image IDs for this batch (if there are any).\n if not (self.image_ids is None):\n batch_image_ids = self.image_ids[current:current+batch_size]\n else:\n batch_image_ids = None\n\n if 'original_images' in returns:\n batch_original_images = deepcopy(batch_X) # The original, unaltered images\n if 'original_labels' in returns:\n batch_original_labels = deepcopy(batch_y) # The original, unaltered labels\n\n current += batch_size\n\n #########################################################################################\n # Maybe perform image transformations.\n #########################################################################################\n\n batch_items_to_remove = [] # In case we need to remove any images from the batch, store their indices in this list.\n batch_inverse_transforms = []\n\n for i in range(len(batch_X)):\n\n if not (self.labels is None):\n # Convert the labels for this image to an array (in case they aren't already).\n batch_y[i] = np.array(batch_y[i])\n # If this image has no ground truth boxes, maybe we don't want to keep it in the batch.\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n # Apply any image transformations we may have received.\n if transformations:\n\n inverse_transforms = []\n\n for transform in transformations:\n\n if not (self.labels is None):\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], batch_y[i], inverse_transform = transform(batch_X[i], batch_y[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i], batch_y[i] = transform(batch_X[i], batch_y[i])\n\n if batch_X[i] is None: # In case the transform failed to produce an output image, which is possible for some random transforms.\n batch_items_to_remove.append(i)\n batch_inverse_transforms.append([])\n continue\n\n else:\n\n if ('inverse_transform' in returns) and ('return_inverter' in inspect.signature(transform).parameters):\n batch_X[i], inverse_transform = transform(batch_X[i], return_inverter=True)\n inverse_transforms.append(inverse_transform)\n else:\n batch_X[i] = transform(batch_X[i])\n\n batch_inverse_transforms.append(inverse_transforms[::-1])\n\n #########################################################################################\n # Check for degenerate boxes in this batch item.\n #########################################################################################\n\n if not (self.labels is None):\n\n xmin = self.labels_format['xmin']\n ymin = self.labels_format['ymin']\n xmax = self.labels_format['xmax']\n ymax = self.labels_format['ymax']\n\n if np.any(batch_y[i][:,xmax] - batch_y[i][:,xmin] <= 0) or np.any(batch_y[i][:,ymax] - batch_y[i][:,ymin] <= 0):\n if degenerate_box_handling == 'warn':\n warnings.warn(\"Detected degenerate ground truth bounding boxes for batch item {} with bounding boxes {}, \".format(i, batch_y[i]) +\n \"i.e. bounding boxes where xmax <= xmin and/or ymax <= ymin. \" +\n \"This could mean that your dataset contains degenerate ground truth boxes, or that any image transformations you may apply might \" +\n \"result in degenerate ground truth boxes, or that you are parsing the ground truth in the wrong coordinate format.\" +\n \"Degenerate ground truth bounding boxes may lead to NaN errors during the training.\")\n elif degenerate_box_handling == 'remove':\n batch_y[i] = box_filter(batch_y[i])\n if (batch_y[i].size == 0) and not keep_images_without_gt:\n batch_items_to_remove.append(i)\n\n #########################################################################################\n # Remove any items we might not want to keep from the batch.\n #########################################################################################\n\n if batch_items_to_remove:\n for j in sorted(batch_items_to_remove, reverse=True):\n # This isn't efficient, but it hopefully shouldn't need to be done often anyway.\n batch_X.pop(j)\n batch_filenames.pop(j)\n if batch_inverse_transforms: batch_inverse_transforms.pop(j)\n if not (self.labels is None): batch_y.pop(j)\n if not (self.image_ids is None): batch_image_ids.pop(j)\n if not (self.eval_neutral is None): batch_eval_neutral.pop(j)\n if 'original_images' in returns: batch_original_images.pop(j)\n if 'original_labels' in returns and not (self.labels is None): batch_original_labels.pop(j)\n\n #########################################################################################\n\n # CAUTION: Converting `batch_X` into an array will result in an empty batch if the images have varying sizes\n # or varying numbers of channels. At this point, all images must have the same size and the same\n # number of channels.\n batch_X = np.array(batch_X)\n if (batch_X.size == 0):\n raise DegenerateBatchError(\"You produced an empty batch. This might be because the images in the batch vary \" +\n \"in their size and/or number of channels. Note that after all transformations \" +\n \"(if any were given) have been applied to all images in the batch, all images \" +\n \"must be homogenous in size along all axes.\")\n\n #########################################################################################\n # If we have a label encoder, encode our labels.\n #########################################################################################\n\n if not (label_encoder is None or self.labels is None):\n\n if ('matched_anchors' in returns) and isinstance(label_encoder, SSDInputEncoder):\n batch_y_encoded, batch_matched_anchors = label_encoder(batch_y, diagnostics=True)\n else:\n batch_y_encoded = label_encoder(batch_y, diagnostics=False)\n batch_matched_anchors = None\n\n else:\n batch_y_encoded = None\n batch_matched_anchors = None\n\n #########################################################################################\n # Compose the output.\n #########################################################################################\n\n ret = []\n if 'processed_images' in returns: ret.append(batch_X)\n if 'encoded_labels' in returns: ret.append(batch_y_encoded)\n if 'matched_anchors' in returns: ret.append(batch_matched_anchors)\n if 'processed_labels' in returns: ret.append(batch_y)\n if 'filenames' in returns: ret.append(batch_filenames)\n if 'image_ids' in returns: ret.append(batch_image_ids)\n if 'evaluation-neutral' in returns: ret.append(batch_eval_neutral)\n if 'inverse_transform' in returns: ret.append(batch_inverse_transforms)\n if 'original_images' in returns: ret.append(batch_original_images)\n if 'original_labels' in returns: ret.append(batch_original_labels)\n\n yield ret\n\n def save_dataset(self,\n filenames_path='filenames.pkl',\n labels_path=None,\n image_ids_path=None,\n eval_neutral_path=None):\n '''\n Writes the current `filenames`, `labels`, and `image_ids` lists to the specified files.\n This is particularly useful for large datasets with annotations that are\n parsed from XML files, which can take quite long. If you'll be using the\n same dataset repeatedly, you don't want to have to parse the XML label\n files every time.\n\n Arguments:\n filenames_path (str): The path under which to save the filenames pickle.\n labels_path (str): The path under which to save the labels pickle.\n image_ids_path (str, optional): The path under which to save the image IDs pickle.\n eval_neutral_path (str, optional): The path under which to save the pickle for\n the evaluation-neutrality annotations.\n '''\n with open(filenames_path, 'wb') as f:\n pickle.dump(self.filenames, f)\n if not labels_path is None:\n with open(labels_path, 'wb') as f:\n pickle.dump(self.labels, f)\n if not image_ids_path is None:\n with open(image_ids_path, 'wb') as f:\n pickle.dump(self.image_ids, f)\n if not eval_neutral_path is None:\n with open(eval_neutral_path, 'wb') as f:\n pickle.dump(self.eval_neutral, f)\n\n def get_dataset(self):\n '''\n Returns:\n 4-tuple containing lists and/or `None` for the filenames, labels, image IDs,\n and evaluation-neutrality annotations.\n '''\n return self.filenames, self.labels, self.image_ids, self.eval_neutral\n\n def get_dataset_size(self):\n '''\n Returns:\n The number of images in the dataset.\n '''\n return self.dataset_size\n"
] | [
[
"numpy.asarray",
"numpy.arange",
"numpy.stack",
"numpy.concatenate",
"numpy.any",
"numpy.random.uniform",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
doaa-altarawy/ml_models_deploy | [
"d39f07887e75aedb0f3530934f0b61afe3fabbac"
] | [
"models/qc_time_estimator/qc_time_estimator/predict.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom qc_time_estimator.config import config\nfrom qc_time_estimator.processing.data_management import load_pipeline\nfrom qc_time_estimator.processing.validation import validate_inputs\nfrom qc_time_estimator.metrics import mape, percentile_rel_90\nfrom qc_time_estimator import __version__ as _version\nimport logging\nfrom typing import Union, List\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef make_prediction(*, input_data: Union[pd.DataFrame, List[dict]]) -> dict:\n \"\"\"Make a prediction using a saved model pipeline.\n Throws exception for invalid input.\n\n Parameters\n ----------\n input_data : DataFram or list of dict\n Array of model prediction inputs.\n\n 1- Required input:\n cpu_clock_speed (in MHz, between 500 and 10,000)\n cpu_launch_year: (between 1990 and current year)\n\n driver: DriverEnum\n method: str\n\n 2- Required one of those two groups:\n\n molecule\n basis_set\n\n # OR\n nelec\n nmo\n\n 3- Optional:\n restricted: bool (default=False)\n nthreads: (default=1)\n\n Other extra fields are ignored and don't cause error.\n\n\n Returns\n -------\n Dict with Lit of Predictions for each input row,\n as well as the model version.\n \"\"\"\n\n pipeline_file_name = f'{config.PIPELINE_SAVE_FILE}{_version}.pkl'\n _qc_time = load_pipeline(file_name=pipeline_file_name)\n\n data = pd.DataFrame(input_data)\n validated_data = validate_inputs(input_data=data)\n\n prediction = _qc_time.predict(validated_data)\n\n results = {'predictions': prediction, 'version': _version}\n\n logger.info(\n f'Making predictions with model version: {_version} \\n'\n f'Original Input data: {data.to_dict(\"records\")} \\n'\n f'Validated Inputs: {validated_data.to_dict(\"records\")} \\n'\n f'Predictions: {results}')\n\n return results\n\ndef get_accuracy(model, X, y):\n \"\"\"Calculate the prediction acuracy (MAPE) and the Percentile for the\n given data using the given model\"\"\"\n\n pred = model.predict(X)\n mape_score = mape(y, pred)\n percentile_99 = percentile_rel_90(y, pred)\n\n return mape_score, percentile_99"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
daanknoors/synthetic_data_generation | [
"5a0d1818cba2bc8b629869773a2f86a156d25fd9"
] | [
"synthesis/evaluation/evaluator.py"
] | [
"\"\"\"\nUtility evaluator. Comparing a reference dataset to 1 or more target datasets.\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nimport synthesis.evaluation.metrics as metrics\nfrom synthesis.evaluation._base import BaseMetric, COLOR_PALETTE\n\n\n\nDEFAULT_METRICS = {\n 'average_js_distance': metrics.MarginalComparison(),\n 'pairwise_correlation_distance': metrics.AssociationsComparison()\n}\n\nclass SyntheticDataEvaluator(BaseMetric):\n \"\"\"Class to compare synthetic data to the original\"\"\"\n def __init__(self, metrics=None):\n \"\"\"Choose which metrics to compute\"\"\"\n self.metrics = metrics\n\n def fit(self, data_original, data_synthetic):\n self._check_input_args()\n data_original, data_synthetic = self._check_input_data(data_original, data_synthetic)\n\n for name, metric in self.metrics.items():\n metric.fit(data_original, data_synthetic)\n return self\n\n def score(self):\n scores = {}\n for name, metric in self.metrics.items():\n scores[name] = metric.score()\n return scores\n\n def plot(self):\n for name, metric in self.metrics.items():\n metric.plot()\n\n def _check_input_args(self):\n if self.metrics is not None:\n for name, metric in self.metrics.items():\n if not isinstance(metric, BaseMetric):\n raise ValueError(\"Input metric {} should subclass synthesis.evaluation._base.BaseMetric\".format(metric))\n else:\n self.metrics = DEFAULT_METRICS\n\n\nclass OriginalDataEvaluator():\n \"\"\"Class to evaluate input dataframe\"\"\"\n def __init__(self, cardinality_threshold=50, rare_category_threshold=0.05):\n self.cardinality_threshold = cardinality_threshold\n self.rare_category_threshold = rare_category_threshold\n\n def fit(self, data):\n self.stats_ = {}\n self.stats_['columns_high_cardinality'] = self.get_high_cardinality_columns(data, self.cardinality_threshold)\n self.stats_['rare_column_categories'] = self.get_rare_column_categories(data, self.rare_category_threshold)\n return self\n\n def plot(self, data, normalize=True):\n column_names = data.columns\n fig, ax = plt.subplots(len(column_names), 1, figsize=(8, len(column_names) * 4))\n\n for idx, col in enumerate(column_names):\n column_value_counts = data.value_counts(normalize=normalize)\n\n bar_position = np.arange(len(column_value_counts.values))\n bar_width = 0.5\n\n ax[idx].bar(x=bar_position, height=column_value_counts.values,\n color=COLOR_PALETTE[0], label='original', width=bar_width)\n\n ax[idx].set_xticks(bar_position + bar_width / 2)\n if len(column_value_counts.values) <= 20:\n ax[idx].set_xticklabels(column_value_counts.keys(), rotation=25)\n else:\n ax[idx].set_xticklabels('')\n\n title = r\"$\\bf{\" + col + \"}$\"\n ax[idx].set_title(title)\n if normalize:\n ax[idx].set_ylabel('Probability')\n else:\n ax[idx].set_ylabel('Count')\n\n ax[idx].legend()\n fig.tight_layout()\n plt.show()\n\n @staticmethod\n def get_high_cardinality_columns(data, threshold):\n \"\"\"Get features with more unique values than the specified threshold.\"\"\"\n return data.columns[data.nunique() > threshold].tolist()\n\n @staticmethod\n def get_rare_column_categories(data, threshold):\n \"\"\"Get rare categories per column\"\"\"\n rare_categories = {}\n for c in data.columns:\n rare_categories[c] = [k for k, v in data[c].value_counts(normalize=True).items() if v < threshold]\n return rare_categories\n\n"
] | [
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Lee-Gihun/Micronet_GSJ | [
"72289bb66507b6c3b4d14f2e5916dec718a1b198"
] | [
"AutoML_autoaug.py"
] | [
"# -*- coding: utf-8 -*-\nimport os\nos.environ['OMP_NUM_THREADS'] = '1'\nimport sys\nimport math\nimport random\nimport shutil\nimport pickle\nimport logging\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.optim.lr_scheduler as lr_scheduler\nimport torchvision.models as models\nimport numpy as np\n\nfrom PIL import Image, ImageEnhance, ImageOps\n\nfrom hyperas import optim as hyperas_optim\nfrom hyperopt import Trials, STATUS_OK, tpe\nfrom hyperas.distributions import choice, uniform\nfrom hyperas.utils import eval_hyperopt_space\n\nfrom data_utils import *\nfrom train_tools import *\nfrom models import *\nfrom counting import *\n\ndef _logging():\n fpath = './results/AutoML/cifar100_autoaug_policy.log'\n logger = logging.getLogger('Autoaugment Policy')\n logger.setLevel(logging.DEBUG)\n if not logger.handlers:\n handler = logging.FileHandler(fpath)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger\n\ndef _get_conf():\n with open('./tmp.pickle', 'rb') as f:\n conf_name = pickle.load(f)\n \n opt = ConfLoader(conf_name).opt\n \n return opt\n \ndef data():\n # it just for processing, meaningless\n dataloader = None\n dataset_size = None\n \n return dataloader, dataset_size\n\ndef create_model(dataloader, dataset_size):\n class SubPolicy():\n def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):\n ranges = {\n \"shearX\": np.linspace(0, 0.3, 10),\n \"shearY\": np.linspace(0, 0.3, 10),\n \"translateX\": np.linspace(0, 150 / 331, 10),\n \"translateY\": np.linspace(0, 150 / 331, 10),\n \"rotate\": np.linspace(0, 30, 10),\n \"color\": np.linspace(0.0, 0.9, 10),\n \"posterize\": np.round(np.linspace(8, 4, 10), 0).astype(np.int),\n \"solarize\": np.linspace(256, 0, 10),\n \"contrast\": np.linspace(0.0, 0.9, 10),\n \"sharpness\": np.linspace(0.0, 0.9, 10),\n \"brightness\": np.linspace(0.0, 0.9, 10),\n \"autocontrast\": [0] * 10,\n \"equalize\": [0] * 10,\n \"invert\": [0] * 10\n }\n\n # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand\n def rotate_with_fill(img, magnitude):\n rot = img.convert(\"RGBA\").rotate(magnitude)\n return Image.composite(rot, Image.new(\"RGBA\", rot.size, (128,) * 4), rot).convert(img.mode)\n\n func = {\n \"shearX\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),\n Image.BICUBIC, fillcolor=fillcolor),\n \"shearY\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),\n Image.BICUBIC, fillcolor=fillcolor),\n \"translateX\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),\n fillcolor=fillcolor),\n \"translateY\": lambda img, magnitude: img.transform(\n img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),\n fillcolor=fillcolor),\n \"rotate\": lambda img, magnitude: rotate_with_fill(img, magnitude),\n # \"rotate\": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),\n \"color\": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),\n \"posterize\": lambda img, magnitude: ImageOps.posterize(img, magnitude),\n \"solarize\": lambda img, magnitude: ImageOps.solarize(img, magnitude),\n \"contrast\": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"sharpness\": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"brightness\": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(\n 1 + magnitude * random.choice([-1, 1])),\n \"autocontrast\": lambda img, magnitude: ImageOps.autocontrast(img),\n \"equalize\": lambda img, magnitude: ImageOps.equalize(img),\n \"invert\": lambda img, magnitude: ImageOps.invert(img)\n }\n\n # self.name = \"{}_{:.2f}_and_{}_{:.2f}\".format(\n # operation1, ranges[operation1][magnitude_idx1],\n # operation2, ranges[operation2][magnitude_idx2])\n self.p1 = p1\n self.operation1 = func[operation1]\n self.magnitude1 = ranges[operation1][magnitude_idx1]\n self.p2 = p2\n self.operation2 = func[operation2]\n self.magnitude2 = ranges[operation2][magnitude_idx2]\n\n\n def __call__(self, img):\n if random.random() < self.p1: img = self.operation1(img, self.magnitude1)\n if random.random() < self.p2: img = self.operation2(img, self.magnitude2)\n return img\n\n class Autoaug():\n def __init__(self, fillcolor=(128, 128, 128)):\n self.policies = [\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor),\n SubPolicy({{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, {{uniform(0, 1.0)}}, {{choice([\"shearX\", \"shearY\", \"translateX\", \"translateY\", \"rotate\", \"color\", \"posterize\", \"solarize\", \"contrast\", \"sharpness\", \"brightness\", \"autocontrast\", \"equalize\", \"invert\"])}}, {{choice([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])}}, fillcolor)\n ]\n \n def __call__(self, img):\n policy_idx = random.randint(0, len(self.policies) - 1)\n return self.policies[policy_idx](img)\n\n def __repr__(self):\n return 'AutoAugment CIFAR100 Policy'\n\n opt = _get_conf()\n logger = _logging()\n if os.path.isdir(opt.data.root):\n shutil.rmtree(opt.data.root)\n \n DATASETTER = {'cifar10': cifar_10_setter,\n 'cifar100': cifar_100_setter}\n \n CRITERION = {'mse': nn.MSELoss,\n 'cross_entropy': nn.CrossEntropyLoss,\n 'label_smoothing': LabelSmoothingLoss}\n\n OPTIMIZER = {'sgd': optim.SGD,\n 'adam': optim.Adam,\n 'adagrad': optim.Adagrad,\n 'rmsprop': optim.RMSprop,\n 'radam': RAdam}\n\n dataloaders, dataset_sizes = DATASETTER[opt.data.dataset](batch_size=opt.data.batch_size, \n valid_size=opt.data.valid_size,\n root=opt.data.root,\n fixed_valid=opt.data.fixed_valid,\n autoaugment=opt.data.autoaugment,\n aug_policy=Autoaug())\n \n avail_resource = opt.model.param.avail_resource\n resolution_coefficient = opt.model.param.resolution_coefficient\n resolution_coefficient = round(math.pow(resolution_coefficient, avail_resource), 2)\n\n blocks_args, global_params = efficientnet(blocks_args='default',\n activation=opt.model.param.activation,\n activation_param=opt.model.param.get('activation_param', {}),\n resolution_coefficient=resolution_coefficient,\n width_coefficient=opt.model.param.width_coefficient, \n depth_coefficient=opt.model.param.depth_coefficient, \n image_size=opt.model.param.image_size, \n num_classes=opt.model.param.num_classes)\n \n #meaningless = {{choice(['No', 'meaning'])}}\n model = EfficientNet(blocks_args, \n global_params)\n \n model.to(opt.trainhandler.device)\n \n criterion = CRITERION[opt.criterion.algo](**opt.criterion.param) if opt.criterion.get('param') else CRITERION[opt.criterion.algo]() \n\n optimizer = OPTIMIZER[opt.optimizer.algo](model.parameters(), **opt.optimizer.param) if opt.optimizer.get('param') else OPTIMIZER[opt.optimizer.algo](model.parameters())\n \n # if not use scheduler, you can skip in config json file\n if opt.scheduler.get('enabled', False):\n scheduler_type = lr_scheduler.MultiStepLR if opt.scheduler.type == 'multistep' else lr_scheduler.CosineAnnealingLR if opt.scheduler.type == 'cosine' else lr_scheduler.StepLR\n scheduler = scheduler_type(optimizer, **opt.scheduler.param)\n else:\n scheduler = None\n \n train_handler = TrainHandler(model, \n dataloaders, \n dataset_sizes, \n criterion, \n optimizer, \n scheduler, \n device=opt.trainhandler.device, \n path=opt.trainhandler.path,\n mixup=opt.trainhandler.mixup.enabled,\n alpha=opt.trainhandler.mixup.alpha,\n precision=opt.trainhandler.precision)\n \n train_handler.set_name(opt.trainhandler.name)\n \n train_losses, valid_losses, train_accs, valid_accs = train_handler.train_model(num_epochs=opt.trainhandler.train.num_epochs)\n \n _, valid_loss = sorted(valid_losses, key = lambda x: x[1])[0]\n _, valid_acc = sorted(valid_accs, key = lambda x: x[1], reverse=True)[0]\n \n logger.info('Validation accuracy : %.2f' % (valid_acc * 100))\n \n return {'loss': valid_loss, 'status': STATUS_OK, 'model': train_handler.model}\n \nif __name__ == '__main__':\n conf_name = sys.argv[1]\n with open('./tmp.pickle', 'wb') as f:\n pickle.dump(conf_name, f)\n \n fpath = './results/AutoML'\n if not os.path.isdir(fpath):\n os.makedirs(fpath)\n if os.path.isfile('./results/AutoML/cifar100_autoaug_policy.log'):\n os.remove('./results/AutoML/cifar100_autoaug_policy.log')\n \n opt = ConfLoader(conf_name).opt\n logger = _logging()\n \n DATASETTER = {'cifar10': cifar_10_setter,\n 'cifar100': cifar_100_setter}\n \n CRITERION = {'mse': nn.MSELoss,\n 'cross_entropy': nn.CrossEntropyLoss,\n 'label_smoothing': LabelSmoothingLoss}\n\n OPTIMIZER = {'sgd': optim.SGD,\n 'adam': optim.Adam,\n 'adagrad': optim.Adagrad,\n 'rmsprop': optim.RMSprop,\n 'radam': RAdam}\n \n trials = Trials()\n best_run, best_model, space = hyperas_optim.minimize(model=create_model,\n data=data,\n algo=tpe.suggest,\n functions=[_get_conf, _logging],\n max_evals=1,\n trials=trials,\n eval_space=True,\n return_space=True)\n \n logger.info('=' * 30)\n logger.info('Best performing model chosen hyper-parameters: %s' % best_run)\n logger.info('=' * 30)\n \n for t, trial in enumerate(trials):\n vals = trial.get('misc').get('vals')\n tmp = {}\n for k,v in list(vals.items()):\n tmp[k] = v[0]\n logger.info('Trial %d : %s' % (t, eval_hyperopt_space(space, tmp)))\n logger.info('=' * 30)\n \n os.remove('./tmp.pickle')"
] | [
[
"numpy.linspace"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aliwimo/uav_placement | [
"85cde62361dd2bbfd907033b6954998b3461b1ee"
] | [
"firefly/config_file.py"
] | [
"import numpy as np\r\n\r\nPOP_SIZE = 50\r\nMAX_GEN = 1\r\nDIM_SIZE = 3\r\nALPHA = 1.0\r\nBETA0 = 0.5\r\nGAMMA = 1.0\r\nBOUND = 1000\r\nUB = BOUND\r\nLB = -BOUND\r\nBUILDING = [20, 50, 200] #b1\r\n# BUILDING = [20, 50, 250] #b2\r\n# BUILDING = [20, 50, 300] #b3\r\n# BUILDING = [10, 50, 250] #b4\r\n# BUILDING = [30, 50, 250] #b5\r\n# BUILDING = [50, 50, 250] #b6\r\n\r\n\r\nLocation_Array = [0] * DIM_SIZE\r\nFirefly_List = [0] * POP_SIZE\r\nO_Firefly_List = [0] * POP_SIZE\r\nFitnesses = [0] * POP_SIZE\r\nBest = []\r\nUsers_Locations = np.loadtxt( 'users/UserLocations_20_50_200.dat' ) #u1\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_20_50_250.dat' ) #u2\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_20_50_300.dat' ) #u3\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_10_50_250.dat' ) #u4\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_30_50_250.dat' ) #u5\r\n# Users_Locations = np.loadtxt( 'users/UserLocations_50_50_250.dat' ) #u6\r\n\r\n"
] | [
[
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Megscammell/METOD-Algorithm | [
"7518145ec100599bddc880f5f52d28f9a3959108",
"7518145ec100599bddc880f5f52d28f9a3959108"
] | [
"src/metod_alg/objective_functions/calc_minimizer_sog.py",
"Numerical_Experiments/Plot functions/fixed_step_length_example.py"
] | [
"import numpy as np\nfrom numpy import linalg as LA\n\n\ndef calc_minimizer_sog(point, p, sigma_sq, store_x0, matrix_test, store_c):\n \"\"\"\n Finds the nearest local minimizer for point using the Sum of Gaussians\n function.\n\n Parameters\n ----------\n point : 1-D array with shape (d, )\n A point used to evaluate the function.\n p : integer\n Number of local minima.\n sigma_sq: float or integer\n Value of sigma squared.\n store_x0 : 2-D arrays with shape (p, d).\n matrix_test : 3-D arrays with shape (p, d, d).\n store_c : 3-D arrays with shape (p, ).\n\n Returns\n -------\n np.argmin(dist) : integer\n Position of the local minimizer which produces the\n smallest distance between point and all p local\n minimizers.\n \"\"\"\n dist = np.zeros((p))\n for i in range(p):\n dist[i] = LA.norm(point - store_x0[i])\n assert(np.min(dist) < 0.25)\n return np.argmin(dist)\n",
"import numpy as np\r\nimport sys\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom metod_alg import objective_functions as mt_obj\r\n\r\n\r\ndef compute_its(x, d, g, args, step_size):\r\n \"\"\"\r\n Function to compute iterations of descent where the step length is a\r\n fixed positive constant.\r\n\r\n Parameters\r\n ----------\r\n x : 1-D array with shape (d, )\r\n Apply descent iterations to point.\r\n d : integer\r\n Size of dimension.\r\n g : gradient of objective function.\r\n\r\n `g(x, *args) -> 1-D array with shape (d, )`\r\n\r\n args : tuple\r\n Arguments passed to the gradient g.\r\n step_size : float\r\n Positive constant used as the step size to compute iterations\r\n of descent.\r\n\r\n Returns\r\n -------\r\n sd_iterations : 2-D array\r\n Each iteration of descent is stored in each row of\r\n sd_iterations.\r\n \"\"\"\r\n sd_iterations = np.zeros((1, d))\r\n sd_iterations[0, :] = x.reshape(1, d)\r\n while np.linalg.norm(g(x, *args)) > 0.1:\r\n x = x - step_size * g(x, *args)\r\n sd_iterations = np.vstack([sd_iterations, x.reshape\r\n ((1, d))])\r\n return sd_iterations\r\n\r\n\r\ndef illustrate_importance_of_step(seed, test_num, step_type):\r\n \"\"\"\r\n Generate contour plot for minimum of several quadratic forms function,\r\n along with plot of descent iterations from a starting point (0.5, 0.55),\r\n with fixed step length.\r\n\r\n Parameters\r\n ----------\r\n seed : integer\r\n Seed used to initialize the pseudo random number generator.\r\n test_num : integer\r\n Number of points to evaluate function to compute contour plot.\r\n step_type : string\r\n Either select step_type = 'long' or step_type = 'short'. If\r\n step_type = 'long', then step_size = 0.6. Otherwise, if\r\n step_type = 'short', then step_size = 0.1.\r\n \"\"\"\r\n np.random.seed(seed)\r\n d = 2\r\n P = 4\r\n lambda_1 = 1\r\n lambda_2 = 5\r\n\r\n f = mt_obj.several_quad_function\r\n g = mt_obj.several_quad_gradient\r\n store_x0, matrix_combined = (mt_obj.function_parameters_several_quad\r\n (P, d, lambda_1, lambda_2))\r\n store_x0 = np.array([[0.96, 0.09],\r\n [0.86, 0.9],\r\n [0.2, 0.98],\r\n [0.12, 0.22]])\r\n args = P, store_x0, matrix_combined\r\n\r\n x = np.linspace(0, 1.2, test_num)\r\n y = np.linspace(0, 1.2, test_num)\r\n Z = np.zeros((test_num, test_num))\r\n X, Y = np.meshgrid(x, y)\r\n for i in range(test_num):\r\n for j in range(test_num):\r\n x1_var = X[i, j]\r\n x2_var = Y[i, j]\r\n Z[i, j] = f(np.array([x1_var, x2_var]).reshape(2, ), *args)\r\n\r\n x = np.array([0.5, 0.55])\r\n if step_type == 'long':\r\n step_size = 0.6\r\n elif step_type == 'short':\r\n step_size = 0.1\r\n descended_x_points = compute_its(x, d, g, args, step_size)\r\n\r\n chosen_x1 = descended_x_points[0:descended_x_points.shape[0]][:, 0]\r\n chosen_x2 = descended_x_points[0:descended_x_points.shape[0]][:, 1]\r\n\r\n if step_type == 'long':\r\n plt.scatter(chosen_x1[0], chosen_x2[0], s=80, color='green',\r\n marker='o')\r\n plt.scatter(chosen_x1[1:4], chosen_x2[1:4], s=20, color='blue')\r\n plt.plot(chosen_x1[:5], chosen_x2[:5], 'blue')\r\n plt.gca().set_xlim(left=0, right=1.2)\r\n plt.gca().set_ylim(bottom=0)\r\n plt.contour(X, Y, Z, 50, cmap='RdGy', alpha=0.5)\r\n elif step_type == 'short':\r\n plt.scatter(chosen_x1[0], chosen_x2[0], s=80, color='green',\r\n marker='o')\r\n plt.scatter(chosen_x1[1:], chosen_x2[1:], s=20, color='blue')\r\n plt.plot(chosen_x1, chosen_x2, 'blue')\r\n plt.gca().set_xlim(left=0, right=1)\r\n plt.gca().set_ylim(bottom=0)\r\n plt.contour(X, Y, Z, 50, cmap='RdGy', alpha=0.5)\r\n plt.savefig('fixed_step_size_d=2_rs_%s_%s.png' % (seed, step_type))\r\n\r\n\r\nif __name__ == \"__main__\":\r\n step_type = str(sys.argv[1])\r\n test_num = 100\r\n seed = 5\r\n illustrate_importance_of_step(seed, test_num, step_type)\r\n"
] | [
[
"numpy.min",
"numpy.zeros",
"numpy.argmin",
"numpy.linalg.norm"
],
[
"matplotlib.pyplot.gca",
"numpy.random.seed",
"numpy.linspace",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.contour",
"numpy.array",
"numpy.meshgrid",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Giannos-G/scikit-learn_modified | [
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda",
"03df71bbea1bcb3423262b711191552420422cda"
] | [
"examples/applications/plot_out_of_core_classification.py",
"benchmarks/lasso_replicas/bench_plot_lasso_path_83.py",
"examples/kernel_approximation/plot_scalable_poly_kernels.py",
"benchmarks/lasso_replicas/bench_plot_lasso_path_81.py",
"examples/miscellaneous/plot_kernel_approximation.py",
"benchmarks/bench_tree_n1_3_n2_4.py",
"benchmarks/bench_plot_lasso_path.py",
"benchmarks/lasso_replicas/bench_plot_lasso_path_42.py",
"examples/linear_model/plot_iris_logistic.py",
"benchmarks/bench_tree_n1_6_n2_5.py",
"benchmarks/bench_tree_n1_3_n2_8.py",
"benchmarks/lasso_replicas/bench_plot_lasso_path_25.py",
"benchmarks/bench_tree_n1_4_n2_8.py",
"examples/semi_supervised/plot_self_training_varying_threshold.py",
"benchmarks/bench_tree_n1_2_n2_9.py",
"examples/linear_model/plot_ransac.py",
"benchmarks/bench_tree_n1_1_n2_10.py",
"benchmarks/lasso_replicas/bench_plot_lasso_path_68.py",
"examples/linear_model/plot_bayesian_ridge_curvefit.py"
] | [
"\"\"\"\n======================================================\nOut-of-core classification of text documents\n======================================================\n\nThis is an example showing how scikit-learn can be used for classification\nusing an out-of-core approach: learning from data that doesn't fit into main\nmemory. We make use of an online classifier, i.e., one that supports the\npartial_fit method, that will be fed with batches of examples. To guarantee\nthat the features space remains the same over time we leverage a\nHashingVectorizer that will project each example into the same feature space.\nThis is especially useful in the case of text classification where new\nfeatures (words) may appear in each batch.\n\"\"\"\n\n# Authors: Eustache Diemert <[email protected]>\n# @FedericoV <https://github.com/FedericoV/>\n# License: BSD 3 clause\n\nfrom glob import glob\nimport itertools\nimport os.path\nimport re\nimport tarfile\nimport time\nimport sys\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\nfrom html.parser import HTMLParser\nfrom urllib.request import urlretrieve\nfrom sklearn.datasets import get_data_home\nfrom sklearn.feature_extraction.text import HashingVectorizer\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.naive_bayes import MultinomialNB\n\n\ndef _not_in_sphinx():\n # Hack to detect whether we are running by the sphinx builder\n return '__file__' in globals()\n\n# %%\n# Reuters Dataset related routines\n# --------------------------------\n#\n# The dataset used in this example is Reuters-21578 as provided by the UCI ML\n# repository. It will be automatically downloaded and uncompressed on first\n# run.\n\n\n\nclass ReutersParser(HTMLParser):\n \"\"\"Utility class to parse a SGML file and yield documents one at a time.\"\"\"\n\n def __init__(self, encoding='latin-1'):\n HTMLParser.__init__(self)\n self._reset()\n self.encoding = encoding\n\n def handle_starttag(self, tag, attrs):\n method = 'start_' + tag\n getattr(self, method, lambda x: None)(attrs)\n\n def handle_endtag(self, tag):\n method = 'end_' + tag\n getattr(self, method, lambda: None)()\n\n def _reset(self):\n self.in_title = 0\n self.in_body = 0\n self.in_topics = 0\n self.in_topic_d = 0\n self.title = \"\"\n self.body = \"\"\n self.topics = []\n self.topic_d = \"\"\n\n def parse(self, fd):\n self.docs = []\n for chunk in fd:\n self.feed(chunk.decode(self.encoding))\n for doc in self.docs:\n yield doc\n self.docs = []\n self.close()\n\n def handle_data(self, data):\n if self.in_body:\n self.body += data\n elif self.in_title:\n self.title += data\n elif self.in_topic_d:\n self.topic_d += data\n\n def start_reuters(self, attributes):\n pass\n\n def end_reuters(self):\n self.body = re.sub(r'\\s+', r' ', self.body)\n self.docs.append({'title': self.title,\n 'body': self.body,\n 'topics': self.topics})\n self._reset()\n\n def start_title(self, attributes):\n self.in_title = 1\n\n def end_title(self):\n self.in_title = 0\n\n def start_body(self, attributes):\n self.in_body = 1\n\n def end_body(self):\n self.in_body = 0\n\n def start_topics(self, attributes):\n self.in_topics = 1\n\n def end_topics(self):\n self.in_topics = 0\n\n def start_d(self, attributes):\n self.in_topic_d = 1\n\n def end_d(self):\n self.in_topic_d = 0\n self.topics.append(self.topic_d)\n self.topic_d = \"\"\n\n\ndef stream_reuters_documents(data_path=None):\n \"\"\"Iterate over documents of the Reuters dataset.\n\n The Reuters archive will automatically be downloaded and uncompressed if\n the `data_path` directory does not exist.\n\n Documents are represented as dictionaries with 'body' (str),\n 'title' (str), 'topics' (list(str)) keys.\n\n \"\"\"\n\n DOWNLOAD_URL = ('http://archive.ics.uci.edu/ml/machine-learning-databases/'\n 'reuters21578-mld/reuters21578.tar.gz')\n ARCHIVE_FILENAME = 'reuters21578.tar.gz'\n\n if data_path is None:\n data_path = os.path.join(get_data_home(), \"reuters\")\n if not os.path.exists(data_path):\n \"\"\"Download the dataset.\"\"\"\n print(\"downloading dataset (once and for all) into %s\" %\n data_path)\n os.mkdir(data_path)\n\n def progress(blocknum, bs, size):\n total_sz_mb = '%.2f MB' % (size / 1e6)\n current_sz_mb = '%.2f MB' % ((blocknum * bs) / 1e6)\n if _not_in_sphinx():\n sys.stdout.write(\n '\\rdownloaded %s / %s' % (current_sz_mb, total_sz_mb))\n\n archive_path = os.path.join(data_path, ARCHIVE_FILENAME)\n urlretrieve(DOWNLOAD_URL, filename=archive_path,\n reporthook=progress)\n if _not_in_sphinx():\n sys.stdout.write('\\r')\n print(\"untarring Reuters dataset...\")\n tarfile.open(archive_path, 'r:gz').extractall(data_path)\n print(\"done.\")\n\n parser = ReutersParser()\n for filename in glob(os.path.join(data_path, \"*.sgm\")):\n for doc in parser.parse(open(filename, 'rb')):\n yield doc\n\n\n# %%\n# Main\n# ----\n#\n# Create the vectorizer and limit the number of features to a reasonable\n# maximum\n\nvectorizer = HashingVectorizer(decode_error='ignore', n_features=2 ** 18,\n alternate_sign=False)\n\n\n# Iterator over parsed Reuters SGML files.\ndata_stream = stream_reuters_documents()\n\n# We learn a binary classification between the \"acq\" class and all the others.\n# \"acq\" was chosen as it is more or less evenly distributed in the Reuters\n# files. For other datasets, one should take care of creating a test set with\n# a realistic portion of positive instances.\nall_classes = np.array([0, 1])\npositive_class = 'acq'\n\n# Here are some classifiers that support the `partial_fit` method\npartial_fit_classifiers = {\n 'SGD': SGDClassifier(max_iter=5),\n 'Perceptron': Perceptron(),\n 'NB Multinomial': MultinomialNB(alpha=0.01),\n 'Passive-Aggressive': PassiveAggressiveClassifier(),\n}\n\n\ndef get_minibatch(doc_iter, size, pos_class=positive_class):\n \"\"\"Extract a minibatch of examples, return a tuple X_text, y.\n\n Note: size is before excluding invalid docs with no topics assigned.\n\n \"\"\"\n data = [('{title}\\n\\n{body}'.format(**doc), pos_class in doc['topics'])\n for doc in itertools.islice(doc_iter, size)\n if doc['topics']]\n if not len(data):\n return np.asarray([], dtype=int), np.asarray([], dtype=int)\n X_text, y = zip(*data)\n return X_text, np.asarray(y, dtype=int)\n\n\ndef iter_minibatches(doc_iter, minibatch_size):\n \"\"\"Generator of minibatches.\"\"\"\n X_text, y = get_minibatch(doc_iter, minibatch_size)\n while len(X_text):\n yield X_text, y\n X_text, y = get_minibatch(doc_iter, minibatch_size)\n\n\n# test data statistics\ntest_stats = {'n_test': 0, 'n_test_pos': 0}\n\n# First we hold out a number of examples to estimate accuracy\nn_test_documents = 1000\ntick = time.time()\nX_test_text, y_test = get_minibatch(data_stream, 1000)\nparsing_time = time.time() - tick\ntick = time.time()\nX_test = vectorizer.transform(X_test_text)\nvectorizing_time = time.time() - tick\ntest_stats['n_test'] += len(y_test)\ntest_stats['n_test_pos'] += sum(y_test)\nprint(\"Test set is %d documents (%d positive)\" % (len(y_test), sum(y_test)))\n\n\ndef progress(cls_name, stats):\n \"\"\"Report progress information, return a string.\"\"\"\n duration = time.time() - stats['t0']\n s = \"%20s classifier : \\t\" % cls_name\n s += \"%(n_train)6d train docs (%(n_train_pos)6d positive) \" % stats\n s += \"%(n_test)6d test docs (%(n_test_pos)6d positive) \" % test_stats\n s += \"accuracy: %(accuracy).3f \" % stats\n s += \"in %.2fs (%5d docs/s)\" % (duration, stats['n_train'] / duration)\n return s\n\n\ncls_stats = {}\n\nfor cls_name in partial_fit_classifiers:\n stats = {'n_train': 0, 'n_train_pos': 0,\n 'accuracy': 0.0, 'accuracy_history': [(0, 0)], 't0': time.time(),\n 'runtime_history': [(0, 0)], 'total_fit_time': 0.0}\n cls_stats[cls_name] = stats\n\nget_minibatch(data_stream, n_test_documents)\n# Discard test set\n\n# We will feed the classifier with mini-batches of 1000 documents; this means\n# we have at most 1000 docs in memory at any time. The smaller the document\n# batch, the bigger the relative overhead of the partial fit methods.\nminibatch_size = 1000\n\n# Create the data_stream that parses Reuters SGML files and iterates on\n# documents as a stream.\nminibatch_iterators = iter_minibatches(data_stream, minibatch_size)\ntotal_vect_time = 0.0\n\n# Main loop : iterate on mini-batches of examples\nfor i, (X_train_text, y_train) in enumerate(minibatch_iterators):\n\n tick = time.time()\n X_train = vectorizer.transform(X_train_text)\n total_vect_time += time.time() - tick\n\n for cls_name, cls in partial_fit_classifiers.items():\n tick = time.time()\n # update estimator with examples in the current mini-batch\n cls.partial_fit(X_train, y_train, classes=all_classes)\n\n # accumulate test accuracy stats\n cls_stats[cls_name]['total_fit_time'] += time.time() - tick\n cls_stats[cls_name]['n_train'] += X_train.shape[0]\n cls_stats[cls_name]['n_train_pos'] += sum(y_train)\n tick = time.time()\n cls_stats[cls_name]['accuracy'] = cls.score(X_test, y_test)\n cls_stats[cls_name]['prediction_time'] = time.time() - tick\n acc_history = (cls_stats[cls_name]['accuracy'],\n cls_stats[cls_name]['n_train'])\n cls_stats[cls_name]['accuracy_history'].append(acc_history)\n run_history = (cls_stats[cls_name]['accuracy'],\n total_vect_time + cls_stats[cls_name]['total_fit_time'])\n cls_stats[cls_name]['runtime_history'].append(run_history)\n\n if i % 3 == 0:\n print(progress(cls_name, cls_stats[cls_name]))\n if i % 3 == 0:\n print('\\n')\n\n\n# %%\n# Plot results\n# ------------\n#\n# The plot represents the learning curve of the classifier: the evolution\n# of classification accuracy over the course of the mini-batches. Accuracy is\n# measured on the first 1000 samples, held out as a validation set.\n#\n# To limit the memory consumption, we queue examples up to a fixed amount\n# before feeding them to the learner.\n\n\ndef plot_accuracy(x, y, x_legend):\n \"\"\"Plot accuracy as a function of x.\"\"\"\n x = np.array(x)\n y = np.array(y)\n plt.title('Classification accuracy as a function of %s' % x_legend)\n plt.xlabel('%s' % x_legend)\n plt.ylabel('Accuracy')\n plt.grid(True)\n plt.plot(x, y)\n\n\nrcParams['legend.fontsize'] = 10\ncls_names = list(sorted(cls_stats.keys()))\n\n# Plot accuracy evolution\nplt.figure()\nfor _, stats in sorted(cls_stats.items()):\n # Plot accuracy evolution with #examples\n accuracy, n_examples = zip(*stats['accuracy_history'])\n plot_accuracy(n_examples, accuracy, \"training examples (#)\")\n ax = plt.gca()\n ax.set_ylim((0.8, 1))\nplt.legend(cls_names, loc='best')\n\nplt.figure()\nfor _, stats in sorted(cls_stats.items()):\n # Plot accuracy evolution with runtime\n accuracy, runtime = zip(*stats['runtime_history'])\n plot_accuracy(runtime, accuracy, 'runtime (s)')\n ax = plt.gca()\n ax.set_ylim((0.8, 1))\nplt.legend(cls_names, loc='best')\n\n# Plot fitting times\nplt.figure()\nfig = plt.gcf()\ncls_runtime = [stats['total_fit_time']\n for cls_name, stats in sorted(cls_stats.items())]\n\ncls_runtime.append(total_vect_time)\ncls_names.append('Vectorization')\nbar_colors = ['b', 'g', 'r', 'c', 'm', 'y']\n\nax = plt.subplot(111)\nrectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,\n color=bar_colors)\n\nax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))\nax.set_xticklabels(cls_names, fontsize=10)\nymax = max(cls_runtime) * 1.2\nax.set_ylim((0, ymax))\nax.set_ylabel('runtime (s)')\nax.set_title('Training Times')\n\n\ndef autolabel(rectangles):\n \"\"\"attach some text vi autolabel on rectangles.\"\"\"\n for rect in rectangles:\n height = rect.get_height()\n ax.text(rect.get_x() + rect.get_width() / 2.,\n 1.05 * height, '%.4f' % height,\n ha='center', va='bottom')\n plt.setp(plt.xticks()[1], rotation=30)\n\n\nautolabel(rectangles)\nplt.tight_layout()\n#plt.show()\n\n# Plot prediction times\nplt.figure()\ncls_runtime = []\ncls_names = list(sorted(cls_stats.keys()))\nfor cls_name, stats in sorted(cls_stats.items()):\n cls_runtime.append(stats['prediction_time'])\ncls_runtime.append(parsing_time)\ncls_names.append('Read/Parse\\n+Feat.Extr.')\ncls_runtime.append(vectorizing_time)\ncls_names.append('Hashing\\n+Vect.')\n\nax = plt.subplot(111)\nrectangles = plt.bar(range(len(cls_names)), cls_runtime, width=0.5,\n color=bar_colors)\n\nax.set_xticks(np.linspace(0, len(cls_names) - 1, len(cls_names)))\nax.set_xticklabels(cls_names, fontsize=8)\nplt.setp(plt.xticks()[1], rotation=30)\nymax = max(cls_runtime) * 1.2\nax.set_ylim((0, ymax))\nax.set_ylabel('runtime (s)')\nax.set_title('Prediction Times (%d instances)' % n_test_documents)\nautolabel(rectangles)\nplt.tight_layout()\n#plt.show()",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n samples_range = np.linspace(10, 500, 3).astype(int) \n features_range = np.linspace(10, 1400 , 3).astype(int) \n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"\"\"\"\n=======================================================\nScalable learning with polynomial kernel aproximation\n=======================================================\n\nThis example illustrates the use of :class:`PolynomialCountSketch` to\nefficiently generate polynomial kernel feature-space approximations.\nThis is used to train linear classifiers that approximate the accuracy\nof kernelized ones.\n\n.. currentmodule:: sklearn.kernel_approximation\n\nWe use the Covtype dataset [2], trying to reproduce the experiments on the\noriginal paper of Tensor Sketch [1], i.e. the algorithm implemented by\n:class:`PolynomialCountSketch`.\n\nFirst, we compute the accuracy of a linear classifier on the original\nfeatures. Then, we train linear classifiers on different numbers of\nfeatures (`n_components`) generated by :class:`PolynomialCountSketch`,\napproximating the accuracy of a kernelized classifier in a scalable manner.\n\"\"\"\nprint(__doc__)\n\n# Author: Daniel Lopez-Sanchez <[email protected]>\n# License: BSD 3 clause\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets import fetch_covtype\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler, Normalizer\nfrom sklearn.svm import LinearSVC\nfrom sklearn.kernel_approximation import PolynomialCountSketch\nfrom sklearn.pipeline import Pipeline, make_pipeline\nimport time\n\n# %%\n# Load the Covtype dataset, which contains 581,012 samples\n# with 54 features each, distributed among 6 classes. The goal of this dataset\n# is to predict forest cover type from cartographic variables only\n# (no remotely sensed data). After loading, we transform it into a binary\n# classification problem to match the version of the dataset in the\n# LIBSVM webpage [2], which was the one used in [1].\n\nX, y = fetch_covtype(return_X_y=True)\n\ny[y != 2] = 0\ny[y == 2] = 1 # We will try to separate class 2 from the other 6 classes.\n\n# %%\n# Here we select 5,000 samples for training and 10,000 for testing.\n# To actually reproduce the results in the original Tensor Sketch paper,\n# select 100,000 for training.\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, train_size=5_000,\n test_size=10_000,\n random_state=42)\n\n# %%\n# Now scale features to the range [0, 1] to match the format of the dataset in\n# the LIBSVM webpage, and then normalize to unit length as done in the\n# original Tensor Sketch paper [1].\n\nmm = make_pipeline(MinMaxScaler(), Normalizer())\nX_train = mm.fit_transform(X_train)\nX_test = mm.transform(X_test)\n\n\n# %%\n# As a baseline, train a linear SVM on the original features and print the\n# accuracy. We also measure and store accuracies and training times to\n# plot them latter.\n\nresults = {}\n\nlsvm = LinearSVC()\nstart = time.time()\nlsvm.fit(X_train, y_train)\nlsvm_time = time.time() - start\nlsvm_score = 100 * lsvm.score(X_test, y_test)\n\nresults[\"LSVM\"] = {\"time\": lsvm_time, \"score\": lsvm_score}\nprint(f\"Linear SVM score on raw features: {lsvm_score:.2f}%\")\n\n# %%\n# Then we train linear SVMs on the features generated by\n# :class:`PolynomialCountSketch` with different values for `n_components`,\n# showing that these kernel feature approximations improve the accuracy\n# of linear classification. In typical application scenarios, `n_components`\n# should be larger than the number of features in the input representation\n# in order to achieve an improvement with respect to linear classification.\n# As a rule of thumb, the optimum of evaluation score / run time cost is\n# typically achieved at around `n_components` = 10 * `n_features`, though this\n# might depend on the specific dataset being handled. Note that, since the\n# original samples have 54 features, the explicit feature map of the\n# polynomial kernel of degree four would have approximately 8.5 million\n# features (precisely, 54^4). Thanks to :class:`PolynomialCountSketch`, we can\n# condense most of the discriminative information of that feature space into a\n# much more compact representation. We repeat the experiment 5 times to\n# compensate for the stochastic nature of :class:`PolynomialCountSketch`.\n\nn_runs = 3\nfor n_components in [250, 500, 1000, 2000]:\n\n ps_lsvm_time = 0\n ps_lsvm_score = 0\n for _ in range(n_runs):\n\n pipeline = Pipeline(steps=[(\"kernel_approximator\",\n PolynomialCountSketch(\n n_components=n_components,\n degree=4)),\n (\"linear_classifier\", LinearSVC())])\n\n start = time.time()\n pipeline.fit(X_train, y_train)\n ps_lsvm_time += time.time() - start\n ps_lsvm_score += 100 * pipeline.score(X_test, y_test)\n\n ps_lsvm_time /= n_runs\n ps_lsvm_score /= n_runs\n\n results[f\"LSVM + PS({n_components})\"] = {\n \"time\": ps_lsvm_time, \"score\": ps_lsvm_score\n }\n print(f\"Linear SVM score on {n_components} PolynomialCountSketch \" +\n f\"features: {ps_lsvm_score:.2f}%\")\n\n# %%\n# Train a kernelized SVM to see how well :class:`PolynomialCountSketch`\n# is approximating the performance of the kernel. This, of course, may take\n# some time, as the SVC class has a relatively poor scalability. This is the\n# reason why kernel approximators are so useful:\n\nfrom sklearn.svm import SVC\n\nksvm = SVC(C=500., kernel=\"poly\", degree=4, coef0=0, gamma=1.)\n\nstart = time.time()\nksvm.fit(X_train, y_train)\nksvm_time = time.time() - start\nksvm_score = 100 * ksvm.score(X_test, y_test)\n\nresults[\"KSVM\"] = {\"time\": ksvm_time, \"score\": ksvm_score}\nprint(f\"Kernel-SVM score on raw featrues: {ksvm_score:.2f}%\")\n\n# %%\n# Finally, plot the resuts of the different methods against their training\n# times. As we can see, the kernelized SVM achieves a higher accuracy,\n# but its training time is much larger and, most importantly, will grow\n# much faster if the number of training samples increases.\n\nN_COMPONENTS = [250, 500, 1000, 2000]\n\nfig, ax = plt.subplots(figsize=(7, 7))\nax.scatter([results[\"LSVM\"][\"time\"], ], [results[\"LSVM\"][\"score\"], ],\n label=\"Linear SVM\", c=\"green\", marker=\"^\")\n\nax.scatter([results[\"LSVM + PS(250)\"][\"time\"], ],\n [results[\"LSVM + PS(250)\"][\"score\"], ],\n label=\"Linear SVM + PolynomialCountSketch\", c=\"blue\")\nfor n_components in N_COMPONENTS:\n ax.scatter([results[f\"LSVM + PS({n_components})\"][\"time\"], ],\n [results[f\"LSVM + PS({n_components})\"][\"score\"], ],\n c=\"blue\")\n ax.annotate(f\"n_comp.={n_components}\",\n (results[f\"LSVM + PS({n_components})\"][\"time\"],\n results[f\"LSVM + PS({n_components})\"][\"score\"]),\n xytext=(-30, 10), textcoords=\"offset pixels\")\n\nax.scatter([results[\"KSVM\"][\"time\"], ], [results[\"KSVM\"][\"score\"], ],\n label=\"Kernel SVM\", c=\"red\", marker=\"x\")\n\nax.set_xlabel(\"Training time (s)\")\nax.set_ylabel(\"Accurary (%)\")\nax.legend()\n#plt.show()\n\n# %%\n# References\n# ==========\n#\n# [1] Pham, Ninh and Rasmus Pagh. \"Fast and scalable polynomial kernels via\n# explicit feature maps.\" KDD '13 (2013).\n# https://doi.org/10.1145/2487575.2487591\n#\n# [2] LIBSVM binary datasets repository\n# https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary.html\n",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n samples_range = np.linspace(10, 500, 3).astype(int) \n features_range = np.linspace(10, 1340 , 3).astype(int) \n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"\"\"\"\n==================================================\nExplicit feature map approximation for RBF kernels\n==================================================\n\nAn example illustrating the approximation of the feature map\nof an RBF kernel.\n\n.. currentmodule:: sklearn.kernel_approximation\n\nIt shows how to use :class:`RBFSampler` and :class:`Nystroem` to\napproximate the feature map of an RBF kernel for classification with an SVM on\nthe digits dataset. Results using a linear SVM in the original space, a linear\nSVM using the approximate mappings and using a kernelized SVM are compared.\nTimings and accuracy for varying amounts of Monte Carlo samplings (in the case\nof :class:`RBFSampler`, which uses random Fourier features) and different sized\nsubsets of the training set (for :class:`Nystroem`) for the approximate mapping\nare shown.\n\nPlease note that the dataset here is not large enough to show the benefits\nof kernel approximation, as the exact SVM is still reasonably fast.\n\nSampling more dimensions clearly leads to better classification results, but\ncomes at a greater cost. This means there is a tradeoff between runtime and\naccuracy, given by the parameter n_components. Note that solving the Linear\nSVM and also the approximate kernel SVM could be greatly accelerated by using\nstochastic gradient descent via :class:`~sklearn.linear_model.SGDClassifier`.\nThis is not easily possible for the case of the kernelized SVM.\n\n\"\"\"\n\n# %%\n# Python package and dataset imports, load dataset\n# ---------------------------------------------------\n\n\n# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>\n# Andreas Mueller <[email protected]>\n# License: BSD 3 clause\n\nprint(__doc__)\n\n# Standard scientific Python imports\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom time import time\n\n# Import datasets, classifiers and performance metrics\nfrom sklearn import datasets, svm, pipeline\nfrom sklearn.kernel_approximation import (RBFSampler,\n Nystroem)\nfrom sklearn.decomposition import PCA\n\n# The digits dataset\ndigits = datasets.load_digits(n_class=9)\n\n\n# %%\n# Timing and accuracy plots\n# --------------------------------------------------\n# To apply an classifier on this data, we need to flatten the image, to\n# turn the data in a (samples, feature) matrix:\nn_samples = len(digits.data)\ndata = digits.data / 16.\ndata -= data.mean(axis=0)\n\n# We learn the digits on the first half of the digits\ndata_train, targets_train = (data[:n_samples // 2],\n digits.target[:n_samples // 2])\n\n\n# Now predict the value of the digit on the second half:\ndata_test, targets_test = (data[n_samples // 2:],\n digits.target[n_samples // 2:])\n# data_test = scaler.transform(data_test)\n\n# Create a classifier: a support vector classifier\nkernel_svm = svm.SVC(gamma=.2)\nlinear_svm = svm.LinearSVC()\n\n# create pipeline from kernel approximation\n# and linear svm\nfeature_map_fourier = RBFSampler(gamma=.2, random_state=1)\nfeature_map_nystroem = Nystroem(gamma=.2, random_state=1)\nfourier_approx_svm = pipeline.Pipeline([(\"feature_map\", feature_map_fourier),\n (\"svm\", svm.LinearSVC())])\n\nnystroem_approx_svm = pipeline.Pipeline([(\"feature_map\", feature_map_nystroem),\n (\"svm\", svm.LinearSVC())])\n\n# fit and predict using linear and kernel svm:\n\nkernel_svm_time = time()\nkernel_svm.fit(data_train, targets_train)\nkernel_svm_score = kernel_svm.score(data_test, targets_test)\nkernel_svm_time = time() - kernel_svm_time\n\nlinear_svm_time = time()\nlinear_svm.fit(data_train, targets_train)\nlinear_svm_score = linear_svm.score(data_test, targets_test)\nlinear_svm_time = time() - linear_svm_time\n\nsample_sizes = 30 * np.arange(1, 10)\nfourier_scores = []\nnystroem_scores = []\nfourier_times = []\nnystroem_times = []\n\nfor D in sample_sizes:\n fourier_approx_svm.set_params(feature_map__n_components=D)\n nystroem_approx_svm.set_params(feature_map__n_components=D)\n start = time()\n nystroem_approx_svm.fit(data_train, targets_train)\n nystroem_times.append(time() - start)\n\n start = time()\n fourier_approx_svm.fit(data_train, targets_train)\n fourier_times.append(time() - start)\n\n fourier_score = fourier_approx_svm.score(data_test, targets_test)\n nystroem_score = nystroem_approx_svm.score(data_test, targets_test)\n nystroem_scores.append(nystroem_score)\n fourier_scores.append(fourier_score)\n\n# plot the results:\nplt.figure(figsize=(16, 4))\naccuracy = plt.subplot(121)\n# second y axis for timings\ntimescale = plt.subplot(122)\n\naccuracy.plot(sample_sizes, nystroem_scores, label=\"Nystroem approx. kernel\")\ntimescale.plot(sample_sizes, nystroem_times, '--',\n label='Nystroem approx. kernel')\n\naccuracy.plot(sample_sizes, fourier_scores, label=\"Fourier approx. kernel\")\ntimescale.plot(sample_sizes, fourier_times, '--',\n label='Fourier approx. kernel')\n\n# horizontal lines for exact rbf and linear kernels:\naccuracy.plot([sample_sizes[0], sample_sizes[-1]],\n [linear_svm_score, linear_svm_score], label=\"linear svm\")\ntimescale.plot([sample_sizes[0], sample_sizes[-1]],\n [linear_svm_time, linear_svm_time], '--', label='linear svm')\n\naccuracy.plot([sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_score, kernel_svm_score], label=\"rbf svm\")\ntimescale.plot([sample_sizes[0], sample_sizes[-1]],\n [kernel_svm_time, kernel_svm_time], '--', label='rbf svm')\n\n# vertical line for dataset dimensionality = 64\naccuracy.plot([64, 64], [0.7, 1], label=\"n_features\")\n\n# legends and labels\naccuracy.set_title(\"Classification accuracy\")\ntimescale.set_title(\"Training times\")\naccuracy.set_xlim(sample_sizes[0], sample_sizes[-1])\naccuracy.set_xticks(())\naccuracy.set_ylim(np.min(fourier_scores), 1)\ntimescale.set_xlabel(\"Sampling steps = transformed feature dimension\")\naccuracy.set_ylabel(\"Classification accuracy\")\ntimescale.set_ylabel(\"Training time in seconds\")\naccuracy.legend(loc='best')\ntimescale.legend(loc='best')\nplt.tight_layout()\n#plt.show()\n\n\n# %%\n# Decision Surfaces of RBF Kernel SVM and Linear SVM\n# --------------------------------------------------------\n# The second plot visualized the decision surfaces of the RBF kernel SVM and\n# the linear SVM with approximate kernel maps.\n# The plot shows decision surfaces of the classifiers projected onto\n# the first two principal components of the data. This visualization should\n# be taken with a grain of salt since it is just an interesting slice through\n# the decision surface in 64 dimensions. In particular note that\n# a datapoint (represented as a dot) does not necessarily be classified\n# into the region it is lying in, since it will not lie on the plane\n# that the first two principal components span.\n# The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail\n# in :ref:`kernel_approximation`.\n\n# visualize the decision surface, projected down to the first\n# two principal components of the dataset\npca = PCA(n_components=8).fit(data_train)\n\nX = pca.transform(data_train)\n\n# Generate grid along first two principal components\nmultiples = np.arange(-2, 2, 0.1)\n# steps along first component\nfirst = multiples[:, np.newaxis] * pca.components_[0, :]\n# steps along second component\nsecond = multiples[:, np.newaxis] * pca.components_[1, :]\n# combine\ngrid = first[np.newaxis, :, :] + second[:, np.newaxis, :]\nflat_grid = grid.reshape(-1, data.shape[1])\n\n# title for the plots\ntitles = ['SVC with rbf kernel',\n 'SVC (linear kernel)\\n with Fourier rbf feature map\\n'\n 'n_components=100',\n 'SVC (linear kernel)\\n with Nystroem rbf feature map\\n'\n 'n_components=100']\n\nplt.figure(figsize=(18, 7.5))\nplt.rcParams.update({'font.size': 14})\n# predict and plot\nfor i, clf in enumerate((kernel_svm, nystroem_approx_svm,\n fourier_approx_svm)):\n # Plot the decision boundary. For that, we will assign a color to each\n # point in the mesh [x_min, x_max]x[y_min, y_max].\n plt.subplot(1, 3, i + 1)\n Z = clf.predict(flat_grid)\n\n # Put the result into a color plot\n Z = Z.reshape(grid.shape[:-1])\n plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)\n plt.axis('off')\n\n # Plot also the training points\n plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired,\n edgecolors=(0, 0, 0))\n\n plt.title(titles[i])\nplt.tight_layout()\n#plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 3\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 4\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n \n samples_range = np.linspace(10, 2000, 5).astype(int)\n \n features_range = np.linspace(10, 2000, 5).astype(int)\n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n samples_range = np.linspace(10, 1730, 3).astype(int) \n features_range = np.linspace(10, 500 , 3).astype(int) \n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n=========================================================\nLogistic Regression 3-class Classifier\n=========================================================\n\nShow below is a logistic-regression classifiers decision boundaries on the\nfirst two dimensions (sepal length and width) of the `iris\n<https://en.wikipedia.org/wiki/Iris_flower_data_set>`_ dataset. The datapoints\nare colored according to their labels.\n\n\"\"\"\nprint(__doc__)\n\n# Code source: Gaël Varoquaux\n# Modified for documentation by Jaques Grobler\n# License: BSD 3 clause\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn import datasets\n\n# import some data to play with\niris = datasets.load_iris()\nX = iris.data[:, :2] # we only take the first two features.\nY = iris.target\n\n# Create an instance of Logistic Regression Classifier and fit the data.\nlogreg = LogisticRegression(C=1e5)\nlogreg.fit(X, Y)\n\n# Plot the decision boundary. For that, we will assign a color to each\n# point in the mesh [x_min, x_max]x[y_min, y_max].\nx_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\ny_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\nh = .02 # step size in the mesh\nxx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\nZ = logreg.predict(np.c_[xx.ravel(), yy.ravel()])\n\n# Put the result into a color plot\nZ = Z.reshape(xx.shape)\nplt.figure(1, figsize=(4, 3))\nplt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)\n\n# Plot also the training points\nplt.scatter(X[:, 0], X[:, 1], c=Y, edgecolors='k', cmap=plt.cm.Paired)\nplt.xlabel('Sepal length')\nplt.ylabel('Sepal width')\n\nplt.xlim(xx.min(), xx.max())\nplt.ylim(yy.min(), yy.max())\nplt.xticks(())\nplt.yticks(())\n\n#plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 6\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 5\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 3\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 8\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n samples_range = np.linspace(10, 1220, 3).astype(int) \n features_range = np.linspace(10, 500 , 3).astype(int) \n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 4\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 8\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"\n=============================================\nEffect of varying threshold for self-training\n=============================================\n\nThis example illustrates the effect of a varying threshold on self-training.\nThe `breast_cancer` dataset is loaded, and labels are deleted such that only 50\nout of 569 samples have labels. A `SelfTrainingClassifier` is fitted on this\ndataset, with varying thresholds.\n\nThe upper graph shows the amount of labeled samples that the classifier has\navailable by the end of fit, and the accuracy of the classifier. The lower\ngraph shows the last iteration in which a sample was labeled. All values are\ncross validated with 3 folds.\n\nAt low thresholds (in [0.4, 0.5]), the classifier learns from samples that were\nlabeled with a low confidence. These low-confidence samples are likely have\nincorrect predicted labels, and as a result, fitting on these incorrect labels\nproduces a poor accuracy. Note that the classifier labels almost all of the\nsamples, and only takes one iteration.\n\nFor very high thresholds (in [0.9, 1)) we observe that the classifier does not\naugment its dataset (the amount of self-labeled samples is 0). As a result, the\naccuracy achieved with a threshold of 0.9999 is the same as a normal supervised\nclassifier would achieve.\n\nThe optimal accuracy lies in between both of these extremes at a threshold of\naround 0.7.\n\"\"\"\nprint(__doc__)\n\n# Authors: Oliver Rausch <[email protected]>\n# License: BSD\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import datasets\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import StratifiedKFold\nfrom sklearn.semi_supervised import SelfTrainingClassifier\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.utils import shuffle\n\nn_splits = 3\n\nX, y = datasets.load_breast_cancer(return_X_y=True)\nX, y = shuffle(X, y, random_state=42)\ny_true = y.copy()\ny[50:] = -1\ntotal_samples = y.shape[0]\n\nbase_classifier = SVC(probability=True, gamma=0.001, random_state=42)\n\nx_values = np.arange(0.4, 1.05, 0.05)\nx_values = np.append(x_values, 0.99999)\nscores = np.empty((x_values.shape[0], n_splits))\namount_labeled = np.empty((x_values.shape[0], n_splits))\namount_iterations = np.empty((x_values.shape[0], n_splits))\n\nfor (i, threshold) in enumerate(x_values):\n self_training_clf = SelfTrainingClassifier(base_classifier,\n threshold=threshold)\n\n # We need manual cross validation so that we don't treat -1 as a separate\n # class when computing accuracy\n skfolds = StratifiedKFold(n_splits=n_splits)\n for fold, (train_index, test_index) in enumerate(skfolds.split(X, y)):\n X_train = X[train_index]\n y_train = y[train_index]\n X_test = X[test_index]\n y_test = y[test_index]\n y_test_true = y_true[test_index]\n\n self_training_clf.fit(X_train, y_train)\n\n # The amount of labeled samples that at the end of fitting\n amount_labeled[i, fold] = total_samples - np.unique(\n self_training_clf.labeled_iter_, return_counts=True)[1][0]\n # The last iteration the classifier labeled a sample in\n amount_iterations[i, fold] = np.max(self_training_clf.labeled_iter_)\n\n y_pred = self_training_clf.predict(X_test)\n scores[i, fold] = accuracy_score(y_test_true, y_pred)\n\n\nax1 = plt.subplot(211)\nax1.errorbar(x_values, scores.mean(axis=1),\n yerr=scores.std(axis=1),\n capsize=2, color='b')\nax1.set_ylabel('Accuracy', color='b')\nax1.tick_params('y', colors='b')\n\nax2 = ax1.twinx()\nax2.errorbar(x_values, amount_labeled.mean(axis=1),\n yerr=amount_labeled.std(axis=1),\n capsize=2, color='g')\nax2.set_ylim(bottom=0)\nax2.set_ylabel('Amount of labeled samples', color='g')\nax2.tick_params('y', colors='g')\n\nax3 = plt.subplot(212, sharex=ax1)\nax3.errorbar(x_values, amount_iterations.mean(axis=1),\n yerr=amount_iterations.std(axis=1),\n capsize=2, color='b')\nax3.set_ylim(bottom=0)\nax3.set_ylabel('Amount of iterations')\nax3.set_xlabel('Threshold')\n\n#plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 2\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 9\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"\n===========================================\nRobust linear model estimation using RANSAC\n===========================================\n\nIn this example we see how to robustly fit a linear model to faulty data using\nthe RANSAC algorithm.\n\n\"\"\"\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfrom sklearn import linear_model, datasets\n\n\nn_samples = 1000\nn_outliers = 50\n\n\nX, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,\n n_informative=1, noise=10,\n coef=True, random_state=0)\n\n# Add outlier data\nnp.random.seed(0)\nX[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))\ny[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)\n\n# Fit line using all data\nlr = linear_model.LinearRegression()\nlr.fit(X, y)\n\n# Robustly fit linear model with RANSAC algorithm\nransac = linear_model.RANSACRegressor()\nransac.fit(X, y)\ninlier_mask = ransac.inlier_mask_\noutlier_mask = np.logical_not(inlier_mask)\n\n# Predict data of estimated models\nline_X = np.arange(X.min(), X.max())[:, np.newaxis]\nline_y = lr.predict(line_X)\nline_y_ransac = ransac.predict(line_X)\n\n# Compare estimated coefficients\nprint(\"Estimated coefficients (true, linear regression, RANSAC):\")\nprint(coef, lr.coef_, ransac.estimator_.coef_)\n\nlw = 2\nplt.scatter(X[inlier_mask], y[inlier_mask], color='yellowgreen', marker='.',\n label='Inliers')\nplt.scatter(X[outlier_mask], y[outlier_mask], color='gold', marker='.',\n label='Outliers')\nplt.plot(line_X, line_y, color='navy', linewidth=lw, label='Linear regressor')\nplt.plot(line_X, line_y_ransac, color='cornflowerblue', linewidth=lw,\n label='RANSAC regressor')\nplt.legend(loc='lower right')\nplt.xlabel(\"Input\")\nplt.ylabel(\"Response\")\n#plt.show()\n",
"\"\"\"\nTo run this, you'll need to have installed.\n\n * scikit-learn\n\nDoes two benchmarks\n\nFirst, we fix a training set, increase the number of\nsamples to classify and plot number of classified samples as a\nfunction of time.\n\nIn the second benchmark, we increase the number of dimensions of the\ntraining set, classify a sample and plot the time taken as a function\nof the number of dimensions.\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport gc\nfrom datetime import datetime\n\n# to store the results\nscikit_classifier_results = []\nscikit_regressor_results = []\n\nmu_second = 0.0 + 10 ** 6 # number of microseconds in a second\n\n\ndef bench_scikit_tree_classifier(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree classifier\"\"\"\n\n from sklearn.tree import DecisionTreeClassifier\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeClassifier()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_classifier_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\ndef bench_scikit_tree_regressor(X, Y):\n \"\"\"Benchmark with scikit-learn decision tree regressor\"\"\"\n\n from sklearn.tree import DecisionTreeRegressor\n\n gc.collect()\n\n # start time\n tstart = datetime.now()\n clf = DecisionTreeRegressor()\n clf.fit(X, Y).predict(X)\n delta = (datetime.now() - tstart)\n # stop time\n\n scikit_regressor_results.append(\n delta.seconds + delta.microseconds / mu_second)\n\n\nif __name__ == '__main__':\n\n print('============================================')\n print('Warning: this is going to take a looong time')\n print('============================================')\n\n n = 1\n step = 10000\n n_samples = 10000\n dim = 10\n n_classes = 10\n for i in range(n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n n_samples += step\n X = np.random.randn(n_samples, dim)\n Y = np.random.randint(0, n_classes, (n_samples,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(n_samples)\n bench_scikit_tree_regressor(X, Y)\n\n xx = range(0, n * step, step)\n plt.figure('scikit-learn tree benchmark results')\n plt.subplot(211)\n plt.title('Learning with varying number of samples')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of samples')\n plt.ylabel('Time (s)')\n\n scikit_classifier_results = []\n scikit_regressor_results = []\n n = 10\n step = 500\n start_dim = 500\n n_classes = 10\n\n dim = start_dim\n for i in range(0, n):\n print('============================================')\n print('Entering iteration %s of %s' % (i, n))\n print('============================================')\n dim += step\n X = np.random.randn(100, dim)\n Y = np.random.randint(0, n_classes, (100,))\n bench_scikit_tree_classifier(X, Y)\n Y = np.random.randn(100)\n bench_scikit_tree_regressor(X, Y)\n\n xx = np.arange(start_dim, start_dim + n * step, step)\n plt.subplot(212)\n plt.title('Learning in high dimensional spaces')\n plt.plot(xx, scikit_classifier_results, 'g-', label='classification')\n plt.plot(xx, scikit_regressor_results, 'r-', label='regression')\n plt.legend(loc='upper left')\n plt.xlabel('number of dimensions')\n plt.ylabel('Time (s)')\n plt.axis('tight')\n #plt.show()\n",
"\"\"\"Benchmarks of Lasso regularization path computation using Lars and CD\n\nThe input data is mostly low rank but is a fat infinite tail.\n\"\"\"\nfrom collections import defaultdict\nimport gc\nimport sys\nfrom time import time\n\nimport numpy as np\n\nfrom sklearn.linear_model import lars_path, lars_path_gram\nfrom sklearn.linear_model import lasso_path\nfrom sklearn.datasets import make_regression\n\n\ndef compute_bench(samples_range, features_range):\n\n it = 0\n\n results = defaultdict(lambda: [])\n\n max_it = len(samples_range) * len(features_range)\n for n_samples in samples_range:\n for n_features in features_range:\n it += 1\n print('====================')\n print('Iteration %03d of %03d' % (it, max_it))\n print('====================')\n dataset_kwargs = {\n 'n_samples': n_samples,\n 'n_features': n_features,\n 'n_informative': n_features // 10,\n 'effective_rank': min(n_samples, n_features) / 10,\n #'effective_rank': None,\n 'bias': 0.0,\n }\n print(\"n_samples: %d\" % n_samples)\n print(\"n_features: %d\" % n_features)\n X, y = make_regression(**dataset_kwargs)\n\n gc.collect()\n print(\"benchmarking lars_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n G = np.dot(X.T, X) # precomputed Gram matrix\n Xy = np.dot(X.T, y)\n lars_path_gram(Xy=Xy, Gram=G, n_samples=y.size, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lars_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lars_path(X, y, method='lasso')\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lars_path (without Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (with Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=True)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (with Gram)'].append(delta)\n\n gc.collect()\n print(\"benchmarking lasso_path (without Gram):\", end='')\n sys.stdout.flush()\n tstart = time()\n lasso_path(X, y, precompute=False)\n delta = time() - tstart\n print(\"%0.3fs\" % delta)\n results['lasso_path (without Gram)'].append(delta)\n\n return results\n\n\nif __name__ == '__main__':\n from mpl_toolkits.mplot3d import axes3d # register the 3d projection\n import matplotlib.pyplot as plt\n\n samples_range = np.linspace(10, 500, 3).astype(int) \n features_range = np.linspace(10, 950 , 3).astype(int) \n results = compute_bench(samples_range, features_range)\n\n max_time = max(max(t) for t in results.values())\n\n fig = plt.figure('scikit-learn Lasso path benchmark results')\n i = 1\n for c, (label, timings) in zip('bcry', sorted(results.items())):\n ax = fig.add_subplot(2, 2, i, projection='3d')\n X, Y = np.meshgrid(samples_range, features_range)\n Z = np.asarray(timings).reshape(samples_range.shape[0],\n features_range.shape[0])\n\n # plot the actual surface\n ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)\n\n # dummy point plot to stick the legend to since surface plot do not\n # support legends (yet?)\n # ax.plot([1], [1], [1], color=c, label=label)\n\n ax.set_xlabel('n_samples')\n ax.set_ylabel('n_features')\n ax.set_zlabel('Time (s)')\n ax.set_zlim3d(0.0, max_time * 1.1)\n ax.set_title(label)\n # ax.legend()\n i += 1\n #plt.show()\n",
"\"\"\"\n============================================\nCurve Fitting with Bayesian Ridge Regression\n============================================\n\nComputes a Bayesian Ridge Regression of Sinusoids.\n\nSee :ref:`bayesian_ridge_regression` for more information on the regressor.\n\nIn general, when fitting a curve with a polynomial by Bayesian ridge\nregression, the selection of initial values of\nthe regularization parameters (alpha, lambda) may be important.\nThis is because the regularization parameters are determined by an iterative\nprocedure that depends on initial values.\n\nIn this example, the sinusoid is approximated by a polynomial using different\npairs of initial values.\n\nWhen starting from the default values (alpha_init = 1.90, lambda_init = 1.),\nthe bias of the resulting curve is large, and the variance is small.\nSo, lambda_init should be relatively small (1.e-3) so as to reduce the bias.\n\nAlso, by evaluating log marginal likelihood (L) of\nthese models, we can determine which one is better.\nIt can be concluded that the model with larger L is more likely.\n\"\"\"\nprint(__doc__)\n\n# Author: Yoshihiro Uchida <[email protected]>\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom sklearn.linear_model import BayesianRidge\n\n\ndef func(x): return np.sin(2*np.pi*x)\n\n\n# #############################################################################\n# Generate sinusoidal data with noise\nsize = 25\nrng = np.random.RandomState(1234)\nx_train = rng.uniform(0., 1., size)\ny_train = func(x_train) + rng.normal(scale=0.1, size=size)\nx_test = np.linspace(0., 1., 100)\n\n\n# #############################################################################\n# Fit by cubic polynomial\nn_order = 3\nX_train = np.vander(x_train, n_order + 1, increasing=True)\nX_test = np.vander(x_test, n_order + 1, increasing=True)\n\n# #############################################################################\n# Plot the true and predicted curves with log marginal likelihood (L)\nreg = BayesianRidge(tol=1e-6, fit_intercept=False, compute_score=True)\nfig, axes = plt.subplots(1, 2, figsize=(8, 4))\nfor i, ax in enumerate(axes):\n # Bayesian ridge regression with different initial value pairs\n if i == 0:\n init = [1 / np.var(y_train), 1.] # Default values\n elif i == 1:\n init = [1., 1e-3]\n reg.set_params(alpha_init=init[0], lambda_init=init[1])\n reg.fit(X_train, y_train)\n ymean, ystd = reg.predict(X_test, return_std=True)\n\n ax.plot(x_test, func(x_test), color=\"blue\", label=\"sin($2\\\\pi x$)\")\n ax.scatter(x_train, y_train, s=50, alpha=0.5, label=\"observation\")\n ax.plot(x_test, ymean, color=\"red\", label=\"predict mean\")\n ax.fill_between(x_test, ymean-ystd, ymean+ystd,\n color=\"pink\", alpha=0.5, label=\"predict std\")\n ax.set_ylim(-1.3, 1.3)\n ax.legend()\n title = \"$\\\\alpha$_init$={:.2f},\\\\ \\\\lambda$_init$={}$\".format(\n init[0], init[1])\n if i == 0:\n title += \" (Default)\"\n ax.set_title(title, fontsize=12)\n text = \"$\\\\alpha={:.1f}$\\n$\\\\lambda={:.3f}$\\n$L={:.1f}$\".format(\n reg.alpha_, reg.lambda_, reg.scores_[-1])\n ax.text(0.05, -1.0, text, fontsize=12)\n\nplt.tight_layout()\n#plt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"numpy.asarray",
"matplotlib.pyplot.plot",
"sklearn.linear_model.SGDClassifier",
"sklearn.linear_model.Perceptron",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.gcf",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"sklearn.linear_model.PassiveAggressiveClassifier",
"matplotlib.pyplot.title",
"sklearn.naive_bayes.MultinomialNB",
"sklearn.feature_extraction.text.HashingVectorizer",
"numpy.array",
"matplotlib.pyplot.ylabel",
"sklearn.datasets.get_data_home",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"sklearn.datasets.fetch_covtype",
"matplotlib.pyplot.subplots",
"sklearn.model_selection.train_test_split",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC",
"sklearn.preprocessing.Normalizer",
"sklearn.kernel_approximation.PolynomialCountSketch",
"sklearn.preprocessing.MinMaxScaler"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.scatter",
"numpy.min",
"matplotlib.pyplot.title",
"numpy.arange",
"sklearn.kernel_approximation.Nystroem",
"sklearn.kernel_approximation.RBFSampler",
"matplotlib.pyplot.subplot",
"sklearn.datasets.load_digits",
"sklearn.svm.SVC",
"sklearn.svm.LinearSVC",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.rcParams.update",
"sklearn.decomposition.PCA",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.yticks",
"sklearn.linear_model.LogisticRegression",
"matplotlib.pyplot.scatter",
"numpy.arange",
"sklearn.datasets.load_iris",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.pcolormesh",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"sklearn.datasets.load_breast_cancer",
"numpy.unique",
"sklearn.utils.shuffle",
"numpy.arange",
"sklearn.model_selection.StratifiedKFold",
"numpy.max",
"numpy.append",
"matplotlib.pyplot.subplot",
"sklearn.semi_supervised.SelfTrainingClassifier",
"sklearn.svm.SVC",
"numpy.empty",
"sklearn.metrics.accuracy_score"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"numpy.logical_not",
"matplotlib.pyplot.legend",
"numpy.random.seed",
"matplotlib.pyplot.scatter",
"sklearn.linear_model.RANSACRegressor",
"matplotlib.pyplot.plot",
"sklearn.datasets.make_regression",
"numpy.random.normal",
"sklearn.linear_model.LinearRegression",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.legend",
"sklearn.tree.DecisionTreeRegressor",
"matplotlib.pyplot.title",
"numpy.arange",
"matplotlib.pyplot.plot",
"sklearn.tree.DecisionTreeClassifier",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.ylabel",
"numpy.random.randn",
"numpy.random.randint",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
],
[
"numpy.dot",
"sklearn.linear_model.lars_path_gram",
"numpy.linspace",
"numpy.asarray",
"sklearn.linear_model.lars_path",
"sklearn.linear_model.lasso_path",
"sklearn.datasets.make_regression",
"numpy.meshgrid",
"matplotlib.pyplot.figure"
],
[
"numpy.vander",
"matplotlib.pyplot.tight_layout",
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.sin",
"sklearn.linear_model.BayesianRidge",
"numpy.var",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bagustris/calfem-python | [
"f5946c7d822ec70d6420a36d197c41ad263d05a0",
"f5946c7d822ec70d6420a36d197c41ad263d05a0"
] | [
"calfem/vis_mpl.py",
"calfem/mesh.py"
] | [
"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.collections\nimport matplotlib.path as mpp\nimport matplotlib.patches as patches\nimport matplotlib as mpl\nimport matplotlib.tri as tri\n\ntry:\n from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\n from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar\nexcept:\n print(\"Could not import Matplotlib backends. Probarbly due to missing Qt.\")\n\nfrom numpy import sin, cos, pi\nfrom math import atan2\n\nimport logging as cflog\n\n\ndef error(msg):\n \"\"\"Log error message\"\"\"\n cflog.error(msg)\n\n\ndef info(msg):\n \"\"\"Log information message\"\"\"\n cflog.info(msg)\n\n\ndef figure_class():\n \"\"\"Return visvis Figure class.\"\"\"\n return None\n\n\nfigureClass = figure_class\n\ncfv_def_mappable = None\n\n\ndef set_mappable(mappable):\n global cfv_def_mappable\n cfv_def_mappable = mappable\n\n\ndef colorbar(**kwargs):\n \"\"\"Add a colorbar to current figure\"\"\"\n global cfv_def_mappable\n if cfv_def_mappable != None:\n cbar = plt.colorbar(mappable=cfv_def_mappable, ax=plt.gca(), **kwargs)\n cfv_def_mappable = None\n return cbar\n else:\n return plt.colorbar(**kwargs)\n\n\ndef figure(figure=None, show=True, fig_size=(4, 3)):\n \"\"\"Create a visvis figure with extras.\"\"\"\n f = None\n\n if figure == None:\n f = plt.figure(figsize=fig_size)\n else:\n try:\n f = plt.figure(figure)\n except:\n f = plt.figure(figsize=fig_size)\n\n return f\n\n\ndef figure_widget(fig, parent=None):\n widget = FigureCanvas(fig)\n if parent != None:\n widget.setParent(parent)\n toolbar = NavigationToolbar(widget, widget)\n return widget\n\n\ndef close_all():\n \"\"\"Close all visvis windows.\"\"\"\n plt.close('all')\n\n\ncloseAll = close_all\n\n\ndef clf():\n \"\"\"Clear visvis figure\"\"\"\n plt.clf()\n\n\ndef gca():\n \"\"\"Get current axis of the current visvis figure.\"\"\"\n return plt.gca()\n\n\ndef gcf():\n return plt.gcf()\n\n\ndef subplot(*args):\n \"\"\"Create a visvis subplot.\"\"\"\n return plt.subplot(*args)\n\n\ndef camera3d():\n \"\"\"Get visvis 3D camera.\"\"\"\n return None\n\n\ndef show_and_wait():\n \"\"\"Wait for plot to show\"\"\"\n plt.show()\n\n\nshowAndWait = show_and_wait\n\n\ndef show_and_wait_mpl():\n \"\"\"Wait for plot to show\"\"\"\n plt.show()\n\n\nshowAndWaitMpl = show_and_wait_mpl\n\n\ndef set_figure_dpi(dpi):\n mpl.rcParams['figure.dpi'] = dpi\n\n\ndef text(text, pos, angle=0, **kwargs):\n return plt.text(pos[0], pos[1], text, **kwargs)\n\n\nadd_text = text\naddText = text\nlabel = text\n\n\ndef ce2vf(coords, edof, dofs_per_node, el_type):\n '''Duplicate code. Extracts verts, faces and verticesPerFace from input.'''\n\n if np.shape(coords)[1] == 2:\n is_3d = False\n # pad with zeros to make 3D\n verts = np.hstack((coords, np.zeros([np.shape(coords)[0], 1])))\n elif np.shape(coords)[1] == 3:\n is_3d = True\n verts = coords\n else:\n raise ValueError('coords must be N-by-2 or N-by-3 array')\n\n if el_type in [2, 4]: # elements with triangular faces\n vertices_per_face = 3\n elif el_type in [3, 5, 16]: # elements with rectangular faces\n vertices_per_face = 4\n else: # [NOTE] This covers all element types available in CALFEM plus tetrahedrons. If more element types are added it is necessary to include them here and below.\n raise ValueError('element type not implemented')\n\n faces = (edof[:, 0::dofs_per_node]-1)/dofs_per_node\n # 'faces' here are actually lists of nodes in elements, not in faces necessarily if the elements are in 3D. This case is handled below.\n\n if el_type in [4, 5]: # if hexahedrons or tetrahedrons:\n if el_type == 5:\n G = np.array([[0, 3, 2, 1],\n [0, 1, 5, 4],\n [4, 5, 6, 7],\n [2, 6, 5, 1],\n [2, 3, 7, 6],\n [0, 4, 7, 3]]) # G is an array that is used to decomposes hexahedrons into its component faces.\n # The numbers are from the node orders (see p94 in the Gmsh manual) and each row makes one face.\n elif el_type == 4:\n G = np.array([[0, 1, 2],\n [0, 3, 2],\n [1, 3, 2],\n [0, 3, 1]]) # This G decomposes tetrahedrons into faces\n faces = np.vstack([faces[i, G] for i in range(faces.shape[0])])\n elif el_type == 16: # if 8-node-quads:\n # The first 4 nodes are the corners of the high order quad.\n faces = faces[:, 0:4]\n\n return verts, np.asarray(faces, dtype=int), vertices_per_face, is_3d\n\n\ndef draw_mesh(coords, edof, dofs_per_node, el_type, title=None, color=(0, 0, 0), face_color=(0.8, 0.8, 0.8), node_color=(0, 0, 0), filled=False, show_nodes=False):\n '''\n Draws wire mesh of model in 2D or 3D. Returns the Mesh object that represents\n the mesh.\n Args:\n coords:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n edof:\n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n dofs_per_nodes:\n Integer. Dofs per node.\n el_type:\n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n axes:\n Matplotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n axes_adjust:\n Boolean. True if the view should be changed to show the whole model. Default True.\n title:\n String. Changes title of the figure. Default \"Mesh\".\n color: \n 3-tuple or char. Color of the wire. Defaults to black (0,0,0). Can also be given as a character in 'rgbycmkw'.\n face_color:\n 3-tuple or char. Color of the faces. Defaults to white (1,1,1). Parameter filled must be True or faces will not be drawn at all.\n filled:\n Boolean. Faces will be drawn if True. Otherwise only the wire is drawn. Default False.\n '''\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n values = np.zeros(faces.shape[0], float)\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n if filled:\n pc = matplotlib.collections.PolyCollection(\n v, facecolor=face_color, **kwargs)\n else:\n pc = matplotlib.collections.PolyCollection(\n v, facecolor='none', **kwargs)\n\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n pc = quatplot(y, z, faces, values, ax=ax, edgecolor=color)\n\n if show_nodes:\n ax.plot(y, z, marker=\"o\", ls=\"\", color=node_color)\n\n if title != None:\n ax.set(title=title)\n\n\ndrawMesh = draw_mesh\n\n\ndef draw_element_values(values, coords, edof, dofs_per_node, el_type, displacements=None, draw_elements=True, draw_undisplaced_mesh=False, magnfac=1.0, title=None, color=(0, 0, 0), node_color=(0, 0, 0)):\n '''\n Draws scalar element values in 2D or 3D. \n\n Args:\n ev: \n An N-by-1 array or a list of scalars. The Scalar values of the elements. ev[i] should be the value of element i.\n \n coords:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n\n edof:\n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n\n dofs_per_node:\n Integer. Dofs per node.\n\n el_type: \n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n \n displacements:\n An N-by-2 or N-by-3 array. Row i contains the x,y,z displacements of node i.\n \n draw_mesh:\n Boolean. True if mesh wire should be drawn. Default True.\n\n draw_undisplaced_mesh: \n Boolean. True if the wire of the undisplaced mesh should be drawn on top of the displaced mesh. Default False. Use only if displacements != None.\n\n magnfac: \n Float. Magnification factor. Displacements are multiplied by this value. Use this to make small displacements more visible.\n\n title: \n String. Changes title of the figure. Default \"Element Values\".\n '''\n\n if draw_undisplaced_mesh:\n draw_mesh(coords, edof, dofs_per_node, el_type, color=(0.5, 0.5, 0.5))\n\n if displacements is not None:\n if displacements.shape[1] != coords.shape[1]:\n displacements = np.reshape(displacements, (-1, coords.shape[1]))\n coords = np.asarray(coords + magnfac * displacements)\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n pc = matplotlib.collections.PolyCollection(\n v, **kwargs)\n\n pc.set_array(np.asarray(values))\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n fig = plt.gcf()\n ax = plt.gca()\n ax.set_aspect('equal')\n\n if draw_elements:\n pc = quatplot(y, z, faces, values, ax=ax,\n edgecolor=color)\n else:\n pc = quatplot(y, z, faces, values, ax=ax,\n edgecolor=None)\n\n # pc = quatplot(y,z, np.asarray(edof-1), values, ax=ax,\n # edgecolor=\"crimson\", cmap=\"rainbow\")\n\n set_mappable(pc)\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_displacements(a, coords, edof, dofs_per_node, el_type, draw_undisplaced_mesh=False, magnfac=-1.0, magscale=0.25, title=None, color=(0, 0, 0), node_color=(0, 0, 0)):\n '''\n Draws scalar element values in 2D or 3D. Returns the world object\n elementsWobject that represents the mesh.\n\n Args:\n ev: \n An N-by-1 array or a list of scalars. The Scalar values of the elements. ev[i] should be the value of element i.\n coords: \n An N-by-2 or N-by-3 array. Row i contains the x,y,z coordinates of node i.\n edof: \n An E-by-L array. Element topology. (E is the number of elements and L is the number of dofs per element)\n dofs_per_node: \n Integer. Dofs per node.\n el_type: \n Integer. Element Type. See Gmsh manual for details. Usually 2 for triangles or 3 for quadrangles.\n displacements: \n An N-by-2 or N-by-3 array. Row i contains the x,y,z displacements of node i.\n axes: \n Matlotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n draw_undisplaced_mesh:\n Boolean. True if the wire of the undisplaced mesh should be drawn on top of the displaced mesh. Default False. Use only if displacements != None.\n magnfac: \n Float. Magnification factor. Displacements are multiplied by this value. Use this to make small displacements more visible.\n title: \n String. Changes title of the figure. Default \"Element Values\".\n '''\n\n if draw_undisplaced_mesh:\n draw_mesh(coords, edof, dofs_per_node, el_type, color=(0.8, 0.8, 0.8))\n\n if a is not None:\n if a.shape[1] != coords.shape[1]:\n a = np.reshape(a, (-1, coords.shape[1]))\n\n x_max = np.max(coords[:, 0])\n x_min = np.min(coords[:, 0])\n\n y_max = np.max(coords[:, 1])\n y_min = np.min(coords[:, 1])\n\n x_size = x_max - x_min\n y_size = y_max - y_min\n\n if x_size > y_size:\n max_size = x_size\n else:\n max_size = y_size\n\n if magnfac < 0:\n magnfac = 0.25*max_size\n\n coords = np.asarray(coords + magnfac * a)\n\n verts, faces, vertices_per_face, is_3d = ce2vf(\n coords, edof, dofs_per_node, el_type)\n\n y = verts[:, 0]\n z = verts[:, 1]\n\n values = []\n\n def quatplot(y, z, quatrangles, values=[], ax=None, **kwargs):\n\n if not ax:\n ax = plt.gca()\n yz = np.c_[y, z]\n v = yz[quatrangles]\n pc = matplotlib.collections.PolyCollection(\n v, **kwargs)\n\n ax.add_collection(pc)\n ax.autoscale()\n return pc\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n pc = quatplot(y, z, faces, values, ax=ax, edgecolor=(\n 0.3, 0.3, 0.3), facecolor='none')\n\n if title != None:\n ax.set(title=title)\n\n\ndef create_ordered_polys(geom, N=10):\n \"\"\"Creates ordered polygons from the geometry definition\"\"\"\n\n N = 10\n\n o_polys = []\n\n for (id, (surf_name, curve_ids, holes, _, _, _)) in geom.surfaces.items():\n\n polygon = np.empty((0, 3), float)\n\n polys = []\n\n for curve_id in curve_ids:\n\n curve_name, curve_points, _, _, _, _ = geom.curves[curve_id]\n points = geom.get_point_coords(curve_points)\n\n if curve_name == \"Spline\":\n P = _catmullspline(points, N)\n if curve_name == \"BSpline\":\n P = _bspline(points, N)\n if curve_name == \"Circle\":\n P = _circleArc(*points, pointsOnCurve=N)\n if curve_name == \"Ellipse\":\n P = _ellipseArc(*points, pointsOnCurve=N)\n\n polys.append(P)\n\n ordered_polys = []\n\n ordered_polys.append(polys.pop())\n\n while len(polys) != 0:\n p0 = ordered_polys[-1]\n for p in polys:\n if np.allclose(p0[-1], p[0]):\n ordered_polys.append(polys.pop())\n break\n elif np.allclose(p0[-1], p[-1]):\n ordered_polys.append(np.flipud(polys.pop()))\n break\n\n for p in ordered_polys:\n polygon = np.concatenate((polygon, p))\n\n o_polys.append(polygon)\n\n return o_polys\n\n\ndef draw_ordered_polys(o_polys):\n\n for poly in o_polys:\n\n ax = plt.gca()\n path = mpp.Path(poly[:, 0:2])\n patch = patches.PathPatch(path, facecolor='orange', lw=1)\n ax.add_patch(patch)\n\n\ndef point_in_geometry(o_polys, point):\n\n for poly in o_polys:\n\n path = mpp.Path(poly[:, 0:2])\n inside = path.contains_points([point])\n\n if inside:\n return True\n\n return False\n\n\ndef topo_to_tri(edof):\n \"\"\"Converts 2d element topology to triangle topology to be used\n with the matplotlib functions tricontour and tripcolor.\"\"\"\n\n if edof.shape[1] == 3:\n return edof\n elif edof.shape[1] == 4:\n new_edof = np.zeros((edof.shape[0]*2, 3), int)\n new_edof[0::2, 0] = edof[:, 0]\n new_edof[0::2, 1] = edof[:, 1]\n new_edof[0::2, 2] = edof[:, 2]\n new_edof[1::2, 0] = edof[:, 2]\n new_edof[1::2, 1] = edof[:, 3]\n new_edof[1::2, 2] = edof[:, 0]\n return new_edof\n elif edof.shape[1] == 8:\n new_edof = np.zeros((edof.shape[0]*6, 3), int)\n new_edof[0::6, 0] = edof[:, 0]\n new_edof[0::6, 1] = edof[:, 4]\n new_edof[0::6, 2] = edof[:, 7]\n new_edof[1::6, 0] = edof[:, 4]\n new_edof[1::6, 1] = edof[:, 1]\n new_edof[1::6, 2] = edof[:, 5]\n new_edof[2::6, 0] = edof[:, 5]\n new_edof[2::6, 1] = edof[:, 2]\n new_edof[2::6, 2] = edof[:, 6]\n new_edof[3::6, 0] = edof[:, 6]\n new_edof[3::6, 1] = edof[:, 3]\n new_edof[3::6, 2] = edof[:, 7]\n new_edof[4::6, 0] = edof[:, 4]\n new_edof[4::6, 1] = edof[:, 6]\n new_edof[4::6, 2] = edof[:, 7]\n new_edof[5::6, 0] = edof[:, 4]\n new_edof[5::6, 1] = edof[:, 5]\n new_edof[5::6, 2] = edof[:, 6]\n return new_edof\n else:\n error(\"Element topology not supported.\")\n\n\ndef draw_nodal_values_contourf(values, coords, edof, levels=12, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as filled contours. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tricontourf(x, y, edof_tri - 1, v.ravel(), levels)\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_nodal_values_contour(values, coords, edof, levels=12, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as filled contours. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tricontour(x, y, edof_tri - 1, v.ravel(), levels)\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndef draw_nodal_values_shaded(values, coords, edof, title=None, dofs_per_node=None, el_type=None, draw_elements=False):\n \"\"\"Draws element nodal values as shaded triangles. Element topologies\n supported are triangles, 4-node quads and 8-node quads.\"\"\"\n\n edof_tri = topo_to_tri(edof)\n\n ax = plt.gca()\n ax.set_aspect('equal')\n\n x, y = coords.T\n v = np.asarray(values)\n plt.tripcolor(x, y, edof_tri - 1, v.ravel(), shading=\"gouraud\")\n\n if draw_elements:\n if dofs_per_node != None and el_type != None:\n draw_mesh(coords, edof, dofs_per_node,\n el_type, color=(0.2, 0.2, 0.2))\n else:\n info(\"dofs_per_node and el_type must be specified to draw the mesh.\")\n\n if title != None:\n ax.set(title=title)\n\n\ndraw_nodal_values = draw_nodal_values_contourf\n\n\ndef draw_geometry(geometry, draw_points=True, label_points=True, label_curves=True, title=None, font_size=11, N=20, rel_margin=0.05, draw_axis=False):\n '''\n Draws the geometry (points and curves) in geoData\n Args:\n geoData:\n GeoData object. Geodata contains geometric information of the model.\n axes:\n Matplotlib Axes. The Axes where the model will be drawn. If unspecified the current Axes will be used, or a new Axes will be created if none exist.\n axes_adjust:\n Boolean. If True the view will be changed to show the whole model. Default True.\n draw_points: \n Boolean. If True points will be drawn.\n label_points:\n Boolean. If True Points will be labeled. The format is: ID[marker]. If a point has marker==0 only the ID is written.\n label_curves:\n Boolean. If True Curves will be labeled. The format is: ID(elementsOnCurve)[marker].\n font_size:\n Integer. Size of the text in the text labels. Default 11.\n N:\n Integer. The number of discrete points per curve segment. Default 20. Increase for smoother curves. Decrease for better performance.\n rel_margin:\n Extra spacing between geometry and axis\n '''\n\n ax = plt.gca()\n ax.set_aspect('equal')\n ax.set_frame_on(draw_axis)\n\n if draw_points:\n P = np.array(geometry.getPointCoords()) # M-by-3 list of M points.\n #plotArgs = {'mc':'r', 'mw':5, 'lw':0, 'ms':'o', 'axesAdjust':False, 'axes':axes}\n plotArgs = {\"marker\": \"o\", \"ls\": \"\"}\n if geometry.is3D:\n plt.plot(P[:, 0], P[:, 1], P[:, 2], **plotArgs)\n else:\n plt.plot(P[:, 0], P[:, 1], **plotArgs)\n\n if label_points: # Write text label at the points:\n # [[x, y, z], elSize, marker]\n for (ID, (xyz, el_size, marker)) in geometry.points.items():\n text = \" \" + str(ID) + (\"[%s]\" %\n marker if marker is not 0 else '')\n plt.text(xyz[0], xyz[1], text,\n fontsize=font_size, color=(0.5, 0, 0.5))\n\n for(ID, (curveName, pointIDs, marker, elementsOnCurve, _, _)) in geometry.curves.items():\n points = geometry.getPointCoords(pointIDs)\n if curveName == \"Spline\":\n P = _catmullspline(points, N)\n if curveName == \"BSpline\":\n P = _bspline(points, N)\n if curveName == \"Circle\":\n P = _circleArc(*points, pointsOnCurve=N)\n if curveName == \"Ellipse\":\n P = _ellipseArc(*points, pointsOnCurve=N)\n # plotArgs = {'lc':'k', 'ms':None, 'axesAdjust':False, 'axes':axes} #Args for plot style. Black lines with no symbols at points.\n\n # Args for plot style. Black lines with no symbols at points.\n plotArgs = {\"color\": \"black\"}\n\n if geometry.is3D:\n plt.plot(P[:, 0], P[:, 1], P[:, 2], **plotArgs)\n else:\n plt.plot(P[:, 0], P[:, 1], **plotArgs)\n\n if label_curves:\n # Sort of midpoint along the curve. Where the text goes.\n midP = P[int(P.shape[0]*7.0/12), :].tolist()\n # Create the text for the curve. Includes ID, elementsOnCurve, and marker:\n text = \" \"+str(ID)\n text += \"(%s)\" % (elementsOnCurve) if elementsOnCurve is not None else ''\n # Something like \"4(5)[8]\"\n text += \"[%s]\" % (marker) if marker is not 0 else ''\n plt.text(midP[0], midP[1], text, fontsize=font_size)\n\n if title != None:\n plt.title(title)\n\n min_x, max_x, min_y, max_y = geometry.bounding_box_2d()\n\n g_width = max_x - min_x\n g_height = max_y - min_y\n\n if g_width > g_height:\n margin = rel_margin*g_width\n else:\n margin = rel_margin*g_height\n\n bottom, top = ax.get_ylim()\n left, right = ax.get_xlim()\n ax.set_ylim(bottom-margin, top+margin)\n ax.set_xlim(left-margin, right+margin)\n\n # if axesAdjust:\n # _adjustaxes(axes, geoData.is3D)\n #axes.daspectAuto = False\n #axes.daspect = (1,1,1)\n\n# drawGeometry = draw_geometry\n\n\ndef _catmullspline(controlPoints, pointsOnEachSegment=10):\n \"\"\"\n Returns points on a Catmull-Rom spline that interpolated the control points.\n Inital/end tangents are created by mirroring the second/second-to-last)\n control points in the first/last points.\n\n Params:\n controlPoints - Numpy array containing the control points of the spline.\n Each row should contain the x,y,(z) values.\n [[x1, y2],\n [x2, y2],\n ...\n [xn, yn]]\n\n pointsOnEachSegment - The number of points on each segment of the curve.\n If there are n control points and k samplesPerSegment,\n then there will be (n+1)*k numeric points on the curve.\n \"\"\"\n controlPoints = np.asarray(\n controlPoints) # Convert to array if input is a list.\n if (controlPoints[0, :] == controlPoints[-1, :]).all():\n # If the curve is closed we extend each opposite endpoint to the other side\n CPs = np.asmatrix(np.vstack((controlPoints[-2, :],\n controlPoints,\n controlPoints[1, :])))\n else: # Else make mirrored endpoints:\n CPs = np.asmatrix(np.vstack((2*controlPoints[0, :] - controlPoints[1, :],\n controlPoints,\n 2*controlPoints[-1, :] - controlPoints[-2, :])))\n M = 0.5 * np.matrix([[0, 2, 0, 0], [-1, 0, 1, 0],\n [2, -5, 4, -1], [-1, 3, -3, 1]])\n t = np.linspace(0, 1, pointsOnEachSegment)\n T = np.matrix([[1, s, pow(s, 2), pow(s, 3)] for s in t])\n return np.asarray(np.vstack([T * M * CPs[j-1:j+3, :] for j in range(1, len(CPs)-2)]))\n\n\ndef _bspline(controlPoints, pointsOnCurve=20):\n '''\n Uniform cubic B-spline.\n\n Params:\n controlPoints - Control points. Numpy array. One coordinate per row.\n pointsOnCurve - number of sub points per segment\n\n Mirrored start- and end-points are added if the curve is not closed.\n If the curve is closed some points are duplicated to make the closed\n spline continuous.\n (See http://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-curve-closed.html)\n\n Based on descriptions on:\n http://www.siggraph.org/education/materials/HyperGraph/modeling/splines/b_spline.htm\n http://en.wikipedia.org/wiki/B-spline#Uniform_cubic_B-splines\n '''\n controlPoints = np.asarray(\n controlPoints) # Convert to array if input is a list.\n if (controlPoints[0, :] == controlPoints[-1, :]).all():\n # If the curve is closed we extend each opposite endpoint to the other side\n CPs = np.asmatrix(np.vstack((controlPoints[-2, :],\n controlPoints,\n controlPoints[1, :])))\n else: # Else make mirrored endpoints:\n CPs = np.asmatrix(np.vstack((2*controlPoints[0, :] - controlPoints[1, :],\n controlPoints,\n 2*controlPoints[-1, :] - controlPoints[-2, :])))\n M = (1.0/6) * np.matrix([[-1, 3, -3, 1],\n [3, -6, 3, 0],\n [-3, 0, 3, 0],\n [1, 4, 1, 0]])\n t = np.linspace(0, 1, pointsOnCurve)\n T = np.matrix([[pow(s, 3), pow(s, 2), s, 1] for s in t])\n\n return np.asarray(np.vstack([T * M * CPs[i-1: i+3, :] for i in range(1, len(CPs)-2)]))\n\n\ndef _circleArc(start, center, end, pointsOnCurve=20):\n return _ellipseArc(start, center, start, end, pointsOnCurve)\n\n\ndef _ellipseArc(start, center, majAxP, end, pointsOnCurve=20):\n '''Input are 3D 1-by-3 numpy arrays or vectors'''\n # First part is to find a similarity transform in 3D that transform the ellipse to\n # the XY-plane with the center at the origin and the major axis of the ellipse along the X-axis.\n\n # convert to arrays in case inputs are lists:\n start, center, majAxP, end, = np.asarray(start), np.asarray(\n center), np.asarray(majAxP), np.asarray(end)\n\n zPrim = np.cross(start-center, end-center)\n zPrim = zPrim / np.linalg.norm(zPrim)\n xPrim = (majAxP-center) / np.linalg.norm(majAxP-center)\n yPrim = np.cross(zPrim, xPrim)\n\n # Rotation matrix from ordinary coords to system where ellipse is in the XY-plane. (Actually hstack)\n R = np.vstack((xPrim, yPrim, zPrim)).T\n # Building Transformation matrix. -center is translation vector from ellipse center to origin.\n T = np.hstack((R, np.asmatrix(center).T))\n # Transformation matrix for homogenous coordinates.\n T = np.mat(np.vstack((T, [0, 0, 0, 1])))\n\n startHC = np.vstack((np.matrix(start).T, [1]))\n # start and end points as column vectors in homogenous coordinates\n endHC = np.vstack((np.matrix(end).T, [1]))\n\n s = np.linalg.inv(T) * startHC\n # start and end points in the new coordinate system\n e = np.linalg.inv(T) * endHC\n\n xs, ys = s[0, 0], s[1, 0]\n # Just extract x & y from the new start and endpoints\n xe, ye = e[0, 0], e[1, 0]\n\n a = np.sqrt((pow(ye*xs, 2) - pow(xe*ys, 2)) / (pow(ye, 2) - pow(ys, 2)))\n b = np.sqrt((pow(ye*xs, 2) - pow(xe*ys, 2)) / ((pow(ye, 2) - pow(ys, 2))\n * ((pow(xe, 2) - pow(xs, 2)) / (pow(ys, 2) - pow(ye, 2)))))\n\n # atan2 is a function that goes from -pi to pi. It gives the signed angle from the X-axis to point (y,x)\n ts = atan2(ys/b, xs/a)\n # We can't use the (transformed) start- and endpoints directly, but we divide x and y by the\n te = atan2(ye/b, xe/a)\n # ellipse minor&major axes to get the parameter t that corresponds to the point on the ellipse.\n # See ellipse formula: x = a * cos (t), y = b * sin(t).\n # So ts and te are the parameter values of the start- and endpoints (in the transformed coordinate system).\n\n if ts > te:\n # swap if the start point comes before the endpoint in the parametric parameter that goes around the ellipse.\n ts, te = te, ts\n if te - ts < np.pi:\n # parameter of ellipse curve. NOT angle to point on curve (like it could be for a circle).\n times = np.linspace(ts, te, pointsOnCurve)\n # the shortest parameter distance between start- and end-point stradles the discontinuity that jumps from pi to -pi.\n else:\n # number of points on the first length.\n ps1 = round(pointsOnCurve * (pi-te)/(2*pi-te+ts))\n # number of points on the first length.\n ps2 = round(pointsOnCurve * (ts+pi)/(2*pi-te+ts))\n times = np.concatenate(\n (np.linspace(te, pi, ps1), np.linspace(-pi, ts, ps2)))\n\n ellArc = np.array([[a*cos(t), b*sin(t)]\n for t in times]).T # points on arc (in 2D)\n # Make 3D homogenous coords by adding rows of 0s and 1s.\n ellArc = np.vstack(\n (ellArc, np.repeat(np.matrix([[0], [1]]), ellArc.shape[1], 1)))\n ellArc = T * ellArc # Transform back to the original coordinate system\n return np.asarray(ellArc.T[:, 0:3]) # return points as an N-by-3 array.\n\n\ndef eldraw2(ex, ey, plotpar=[1, 2, 1], elnum=[]):\n \"\"\"\n eldraw2(ex,ey,plotpar,elnum)\n eldraw2(ex,ey,plotpar)\n eldraw2(ex,ey)\n\n PURPOSE\n Draw the undeformed 2D mesh for a number of elements of\n the same type. Supported elements are:\n\n 1) -> bar element 2) -> beam el.\n 3) -> triangular 3 node el. 4) -> quadrilateral 4 node el.\n 5) -> 8-node isopar. elemen\n\n INPUT\n ex,ey:.......... nen: number of element nodes\n nel: number of elements\n plotpar=[ linetype, linecolor, nodemark]\n\n linetype=1 -> solid linecolor=1 -> black\n 2 -> dashed 2 -> blue\n 3 -> dotted 3 -> magenta\n 4 -> red\n\n nodemark=1 -> circle\n 2 -> star\n 0 -> no mark\n\n elnum=edof(:,1) ; i.e. the first column in the topology matrix\n\n Rem. Default is solid white lines with circles at nodes.\n \"\"\"\n\n line_type = plotpar[0]\n line_color = plotpar[1]\n node_mark = plotpar[2]\n\n # Translate CALFEM plotpar to visvis\n\n vv_line_type = '-'\n vv_line_color = 'b'\n vv_node_mark = 'o'\n\n if line_type == 1:\n vv_line_type = '-'\n elif line_type == 2:\n vv_line_type = '--'\n elif line_type == 3:\n vv_line_type = ':'\n\n if line_color == 1:\n vv_line_color = 'k'\n elif line_color == 2:\n vv_line_color = 'b'\n elif line_color == 3:\n vv_line_color = 'm'\n elif line_color == 4:\n vv_line_color = 'r'\n\n if node_mark == 1:\n vv_node_mark = 'o'\n elif node_mark == 2:\n vv_node_mark = 'x'\n elif node_mark == 0:\n vv_node_mark = ''\n\n vv_marker_color = vv_line_color\n\n plt.axis('equal')\n\n draw_element_numbers = False\n\n if len(elnum) == ex.shape[0]:\n draw_element_numbers = True\n\n i = 0\n\n for elx, ely in zip(ex, ey):\n x = elx.tolist()\n x.append(elx[0])\n y = ely.tolist()\n y.append(ely[0])\n\n xm = sum(x)/len(x)\n ym = sum(y)/len(y)\n\n plt.plot(x, y, vv_line_color + vv_node_mark + vv_line_type)\n\n\ndef eliso2_mpl(ex, ey, ed):\n\n plt.axis('equal')\n\n print(np.shape(ex))\n print(np.shape(ey))\n print(np.shape(ed))\n\n gx = []\n gy = []\n gz = []\n\n for elx, ely, scl in zip(ex, ey, ed):\n for x in elx:\n gx.append(x)\n for y in ely:\n gy.append(y)\n for z in ely:\n gz.append(y)\n\n plt.tricontour(gx, gy, gz, 5)\n",
"import os\nimport sys\nimport tempfile\nimport shutil\nimport subprocess\nimport numpy as np\nfrom calfem.core import createdofs\nfrom calfem.utils import which\nimport calfem.core as cfc\n\nimport logging as cflog\n\nimport gmsh\n\ndef error(msg):\n \"\"\"Log error message\"\"\"\n cflog.error(msg)\n\n\ndef info(msg):\n \"\"\"Log information message\"\"\"\n cflog.info(msg)\n\n\ndef cmp(a, b):\n return (a > b) ^ (a < b)\n\n# def dofsFromNodes(listOfNodes, dofs):\n# D = []\n# for node in listOfNodes:\n# D.extend(dofs[node])\n# return D\n\n\ndef _offsetIndices(lst, offset=0):\n '''Shifts the indices by offset. \n Positive offsets move the indices away from 0.\n Negative offsets move the indices towards 0.\n If an index is 0 the offset is added to it.'''\n return [x + cmp(x, -0.5)*offset for x in lst]\n\n\ndef _formatList(lst, offset=0):\n \"\"\"\n Turns a list of numbers into a corresponding string of comma-separated numbers.\n The parameter offset is a number that is added to the numbers.\n Can be used for turning a list of 0-based indices into a corresponding string\n of comma-separated offset indices. Offsets depend on the sign, i.e. negative\n numbers get the offset subtracted.\n Do not use offsets on lists with negative float values. \n \"\"\"\n # Increment the indices by 1. Join list-elements as strings separated by ', '.\n try:\n return ', '.join(map(str, _offsetIndices(lst, offset)))\n except TypeError:\n # If lst is not iterable (causes TypeError), then it is probably an integer.\n return lst+offset\n\n\ndef _insertInSetDict(dictionary, key, values):\n '''inserts values at key in dictionary containing sets. Values may be\n a single value or iterable, in which case each value is inserted'''\n if not key in dictionary:\n dictionary[key] = set()\n try:\n for v in values:\n dictionary[key].add(v)\n # Exception if values is not an iterable - insert values itself instead.\n except TypeError:\n dictionary[key].add(values)\n\n\ndef _insertBoundaryElement(boundaryElements, elementType, marker, nodes):\n \"\"\"\n Insert an element to the boundaryElements dict.\n\n Parameters:\n\n boundaryElements Dictionary of boundary elements\n\n elementType 'elm-type' according to GMSH\n\n marker Boundary marker\n\n nodes List of element nodes, order according to GMSH\n \"\"\"\n if not marker in boundaryElements:\n boundaryElements[marker] = []\n boundaryElements[marker].append(\n {'elm-type': elementType, 'node-number-list': nodes})\n\n\ndef createGmshMesh(geometry, el_type=2, el_size_factor=1, dofs_per_node=1,\n gmsh_exec_path=None, clcurv=False,\n min_size=None, max_size=None, meshing_algorithm=None,\n additional_options=''):\n\n meshGen = GmshMeshGenerator(geometry, el_type, el_size_factor, dofs_per_node,\n gmsh_exec_path, clcurv, min_size, max_size, meshing_algorithm,\n additional_options)\n\n return meshGen.create()\n\n\ncreateMesh = createGmshMesh\ncreate_mesh = createGmshMesh\nmesh = createGmshMesh\n\n\nclass GmshMeshGenerator:\n '''\n Meshes geometry in GeoData objects or geo-files by calling the Gmsh executable.\n This is done when the function create() is called.\n '''\n\n def __init__(self, geometry, el_type=2, el_size_factor=1, dofs_per_node=1,\n gmsh_exec_path=None, clcurv=False,\n min_size=None, max_size=None, meshing_algorithm=None,\n additional_options='', mesh_dir='', return_boundary_elements=False):\n ''' \n Parameters:\n\n geometry GeoData instance or string containing path to .geo-file\n\n el_type Integer. Element type and order. \n See gmsh manual for details.\n\n el_size_factor Float. Factor by which the element sizes are multiplied.\n\n dofs_per_node Number of degrees of freedom per node.\n\n gmsh_exec_path File path to where the gmsh executable is located.\n\n clcurv Set to true to make elements smaller at high curvatures. \n (Experimental option according to the gmsh manual)\n\n min_size Minimum element size\n\n max_size Maximum element size\n\n meshing_algorithm String. Select mesh algorithm ('meshadapt', 'del2d',\n 'front2d', 'del3d', 'front3d', ...). \n See the gmsh manual for more info.\n\n return_boundary_elements Flag for returning dictionary with boundary element\n information. Useful for applying loads on boundary.\n\n additional_options String containing additional command line args for gmsh.\n Use this if a gmsh option is not covered by the above \n parameters (See section 3.3 in the gmsh manual for a \n list of options)):\n '''\n self.geometry = geometry\n self.el_type = el_type\n self.el_size_factor = el_size_factor\n self.dofs_per_node = dofs_per_node\n self.gmsh_exec_path = gmsh_exec_path\n self.clcurv = clcurv\n self.min_size = min_size\n self.max_size = max_size\n self.meshing_algorithm = meshing_algorithm\n self.additional_options = additional_options\n self.mesh_dir = mesh_dir\n self.return_boundary_elements = return_boundary_elements\n\n # gmsh elements that have rectangle faces\n self._ElementsWithQuadFaces = [3, 5, 10, 12, 16, 17, 92, 93]\n self._2ndOrderElms = [8, 9, 10, 11, 12,\n 13, 14, 16, 17, 18,\n 19]\n self._2dOrderIncompleteElms = [9, 11, 13, 14,\n 16, 17, 18, 19]\n # Apart from 16 the 2nd orders are totally untested. Only 16 (8-node quad)\n # is implemented in pycalfem though, so it does not matter.\n\n self.use_gmsh_module = True\n self.remove_gmsh_signal_handler = True\n self.initialize_gmsh = True\n\n def create(self, is3D=False):\n '''\n Meshes a surface or volume defined by the geometry in geoData.\n Parameters:\n is3D - Optional parameter that only needs to be set if geometry\n is loaded from a geo-file, i.e. if geoData is a path string.\n Default False.\n\n Returns:\n\n coords Node coordinates\n\n [[n0_x, n0_y, n0_z],\n [ ... ],\n [nn_x, nn_y, nn_z]]\n\n edof Element topology\n\n [[el0_dof1, ..., el0_dofn],\n [ ... ],\n [eln_dof1, ..., eln_dofn]]\n\n dofs Node dofs\n\n [[n0_dof1, ..., n0_dofn],\n [ ... ],\n [nn_dof1, ..., nn_dofn]]\n\n bdofs Boundary dofs. Dictionary containing lists of dofs for\n each boundary marker. Dictionary key = marker id.\n\n elementmarkers List of integer markers. Row i contains the marker of\n element i. Markers are similar to boundary markers and\n can be used to identify in which region an element lies.\n\n boundaryElements (optional) returned if self.return_boundary_elements is true.\n Contains dictionary with boundary elements. The keys are markers\n and the values are lists of elements for that marker.\n\n Running this function also creates object variables:\n\n nodesOnCurve Dictionary containing lists of node-indices. Key is a \n curve-ID and the value is a list of indices of all nodes\n on that curve, including its end points.\n\n nodesOnSurface Dictionary containing lists of node-indices. Key is a\n surface-ID and the value is a list of indices of the nodes\n on that surface, including its boundary.\n\n nodesOnVolume Dictionary containing lists of node-indices. Key is a\n volume-ID and the value is a list of indices of the nodes\n in that volume, including its surface. \n '''\n # Nodes per element for different element types:\n # (taken from Chapter 9, page 89 of the gmsh manual)\n nodesPerElmDict = {1: 2, 2: 3, 3: 4, 4: 4, 5: 8,\n 6: 6, 7: 5, 8: 3, 9: 6, 10: 9,\n 11: 10, 12: 27, 13: 18, 14: 14, 15: 1,\n 16: 8, 17: 20, 18: 15, 19: 13, 20: 9,\n 21: 10, 22: 12, 23: 15, 24: 15, 25: 21,\n 26: 4, 27: 5, 28: 6, 29: 20, 30: 35,\n 31: 56, 92: 64, 93: 125}\n nodesPerElement = nodesPerElmDict[self.el_type]\n\n # Check for GMSH executable \n # \n # Consider using the gmsh_extension module\n\n if not self.use_gmsh_module:\n gmshExe = self.gmsh_exec_path\n if gmshExe == None:\n gmshExe = None\n if sys.platform == \"win32\":\n gmshExe = which(\"gmsh.bat\")\n if gmshExe == None:\n gmshExe = which(\"gmsh.exe\")\n else:\n gmshExe = which(\"gmsh\")\n else:\n if not os.path.exists(gmshExe):\n gmshExe = os.path.join(\n os.getcwd(), self.gmsh_exec_path) # Try relative path\n if not os.path.exists(gmshExe):\n gmshExe = None # Relative path didnt work either\n\n if gmshExe == None:\n raise IOError(\n \"Error: Could not find GMSH. Please make sure that the \\GMSH executable is available on the search path (PATH).\")\n else:\n print(\"Info : GMSH -> %s\" % gmshExe)\n else:\n print(\"Info : GMSH -> Python-module\")\n\n # Create a temporary directory for GMSH\n\n oldStyleTempDir = False\n\n if self.mesh_dir != \"\":\n tempMeshDir = self.mesh_dir\n if not os.path.exists(tempMeshDir):\n os.mkdir(tempMeshDir)\n else:\n tempMeshDir = tempfile.mkdtemp()\n\n # If geometry data is given as a .geo file we will just pass it on to gmsh later.\n\n if type(self.geometry) is str:\n geoFilePath = self.geometry\n\n # In this case geoData is a path string, so the dimension must be supplied by the user.\n\n dim = 3 if is3D else 2\n if not os.path.exists(geoFilePath):\n geoFilePath = os.path.join(\n os.getcwd(), geoFilePath) # Try relative path\n if not os.path.exists(geoFilePath):\n raise IOError(\n \"Error: Could not find geo-file \" + geoFilePath)\n else:\n\n # Get the dimension of the model from geoData.\n\n dim = 3 if self.geometry.is3D else 2\n\n if oldStyleTempDir:\n if not os.path.exists(\"./gmshMeshTemp\"):\n os.mkdir(\"./gmshMeshTemp\")\n geoFilePath = os.path.normpath(os.path.join(\n os.getcwd(), \"gmshMeshTemp/tempGeometry.geo\")) # \"gmshMeshTemp/tempGeometry.geo\"\n else:\n geoFilePath = os.path.normpath(\n os.path.join(tempMeshDir, 'tempGeometry.geo'))\n\n with open(geoFilePath, \"w\") as self.geofile:\n self._writeGeoFile() # Write geoData to file\n\n if oldStyleTempDir:\n\n # Filepath to the msh-file that will be generated.\n\n mshFileName = os.path.normpath(os.path.join(\n os.getcwd(), 'gmshMeshTemp/meshFile.msh'))\n else:\n mshFileName = os.path.normpath(\n os.path.join(tempMeshDir, 'meshFile.msh'))\n\n # construct options string:\n\n options = \"\"\n options += ' -' + str(dim)\n options += ' -clscale ' + str(self.el_size_factor) # scale factor\n options += ' -o \\\"%s\\\"' % mshFileName\n options += ' -clcurv' if self.clcurv else ''\n options += ' -clmin ' + \\\n str(self.min_size) if self.min_size is not None else ''\n options += ' -clmax ' + \\\n str(self.max_size) if self.max_size is not None else ''\n options += ' -algo ' + self.meshing_algorithm if self.meshing_algorithm is not None else ''\n options += ' -order 2' if self.el_type in self._2ndOrderElms else ''\n options += ' -format msh22'\n options += ' -v 5'\n options += ' ' + self.additional_options\n\n # Execute gmsh\n\n if self.use_gmsh_module:\n\n # Meshing using gmsh extension module\n\n if self.initialize_gmsh:\n gmsh.initialize(sys.argv)\n\n # This is a hack to enable the use of gmsh in \n # a separate thread.\n\n if self.remove_gmsh_signal_handler:\n gmsh.oldsig = None\n\n # Load .geo file\n\n gmsh.open(geoFilePath)\n gmsh.model.geo.synchronize()\n\n # Set meshing options\n\n if self.el_type in self._2ndOrderElms:\n gmsh.option.setNumber(\"Mesh.ElementOrder\", 2)\n \n if self.meshing_algorithm is not None:\n gmsh.option.setString(self.meshing_algorithm)\n \n gmsh.option.setNumber(\"Mesh.MshFileVersion\", 2.2)\n gmsh.option.setNumber(\"Mesh.MeshSizeFactor\", self.el_size_factor)\n\n if self.clcurv is not None:\n gmsh.option.setNumber(\"Mesh.MeshSizeFromCurvature\", self.clcurv)\n\n if self.min_size is not None:\n gmsh.option.setNumber('Mesh.MeshSizeMin', self.min_size)\n if self.max_size is not None:\n gmsh.option.setNumber('Mesh.MeshSizeMax', self.max_size)\n\n # Generate mesh\n\n gmsh.model.mesh.generate(2)\n\n # Write .msh file\n\n gmsh.write(mshFileName)\n\n # Close extension module\n\n if self.initialize_gmsh:\n gmsh.finalize() \n else:\n gmshExe = os.path.normpath(gmshExe)\n info(\"GMSH binary: \"+gmshExe)\n\n output = subprocess.Popen(r'\"%s\" \"%s\" %s' % (\n gmshExe, geoFilePath, options), shell=True, stdout=subprocess.PIPE).stdout.read()\n \n # Read generated msh file:\n # print(\"Opening msh file \" + mshFileName)#TEMP\n\n with open(mshFileName, 'r') as mshFile:\n\n info(\"Mesh file : \"+mshFileName)\n\n # print(\"Reading msh file...\")\n\n ln = mshFile.readline()\n while(ln != '$Nodes\\n'): # Read until we find the nodes\n ln = mshFile.readline()\n nbrNodes = int(mshFile.readline())\n allNodes = np.zeros([nbrNodes, dim], 'd')\n for i in range(nbrNodes):\n line = list(map(float, mshFile.readline().split()))\n\n # Grab the coordinates (1:3 if 2D, 1:4 if 3D)\n\n allNodes[i, :] = line[1:dim+1]\n\n while(mshFile.readline() != '$Elements\\n'): # Read until we find the elements\n pass\n\n # The nbr of elements (including marker elements).\n\n nbrElements = int(mshFile.readline())\n elements = []\n elementmarkers = []\n\n # temp dictionary of sets. Key:MarkerID. Value:Set. The sets will be converted to lists.\n\n bdofs = {}\n boundaryElements = {}\n\n # nodeOnPoint = {} #dictionary pointID : nodeNumber\n\n self.nodesOnCurve = {} # dictionary lineID : set of [nodeNumber]\n self.nodesOnSurface = {} # dictionary surfID : set of [nodeNumber]\n self.nodesOnVolume = {} # dictionary volID : set of [nodeNumber]\n\n # Read all elements (points, surfaces, etc):\n\n for i in range(nbrElements):\n line = list(map(int, mshFile.readline().split()))\n eType = line[1] # second int is the element type.\n nbrTags = line[2] # Third int is the nbr of tags on this element.\n marker = line[3] # Fourth int (first tag) is the marker.\n\n # Fifth int is the ID of the geometric entity (points, curves, etc) that the element belongs to\n\n entityID = line[4]\n\n # The rest after tags are node indices.\n\n nodes = line[3+nbrTags: len(line)]\n\n # If the element type is the kind of element we are looking for:\n\n if(eType == self.el_type):\n\n # Add the nodes of the elements to the list.\n\n elements.append(nodes)\n\n # Add element marker. It is used for keeping track of elements (thickness, heat-production and such)\n\n elementmarkers.append(marker)\n else: # If the element is not a \"real\" element we store its node at marker in bdof instead:\n _insertInSetDict(bdofs, marker, nodes)\n\n # We also store the full information as 'boundary elements'\n\n _insertBoundaryElement(boundaryElements, eType, marker, nodes)\n\n # if eType == 15: #If point. Commmented away because points only make elements if they have non-zero markers, so nodeOnPoint is not very useful.\n # nodeOnPoint[entityID-1] = nodes[0] #insert node into nodeOnPoint. (ID-1 because we want 0-based indices)\n\n if eType in [1, 8, 26, 27, 28]: # If line\n\n # insert nodes into nodesOnCurve\n\n _insertInSetDict(self.nodesOnCurve, entityID -\n 1, _offsetIndices(nodes, -1))\n elif eType in [2, 3, 9, 10, 16, 20, 21, 22, 23, 24, 25]: # If surfaceelement\n\n # insert nodes into nodesOnSurface\n\n _insertInSetDict(self.nodesOnSurface, entityID -\n 1, _offsetIndices(nodes, -1))\n else: \n \n # if volume element.\n\n _insertInSetDict(self.nodesOnVolume, entityID -\n 1, _offsetIndices(nodes, -1))\n\n elements = np.array(elements)\n for key in bdofs.keys(): # Convert the sets of boundary nodes to lists.\n bdofs[key] = list(bdofs[key])\n for key in self.nodesOnCurve.keys(): # Convert set to list\n self.nodesOnCurve[key] = list(self.nodesOnCurve[key])\n for key in self.nodesOnSurface.keys(): # Convert set to list\n self.nodesOnSurface[key] = list(self.nodesOnSurface[key])\n for key in self.nodesOnVolume.keys(): # Convert set to list\n self.nodesOnVolume[key] = list(self.nodesOnVolume[key])\n\n # Remove temporary mesh directory if not explicetly specified.\n\n if self.mesh_dir == \"\":\n shutil.rmtree(tempMeshDir)\n\n dofs = createdofs(np.size(allNodes, 0), self.dofs_per_node)\n\n if self.dofs_per_node > 1: # This if-chunk copied from pycalfem_utils.py\n self.topo = elements\n expandedElements = np.zeros(\n (np.size(elements, 0), nodesPerElement*self.dofs_per_node), 'i')\n elIdx = 0\n for elementTopo in elements:\n for i in range(nodesPerElement):\n expandedElements[elIdx, i*self.dofs_per_node:(\n i*self.dofs_per_node+self.dofs_per_node)] = dofs[elementTopo[i]-1, :]\n elIdx += 1\n\n for keyID in bdofs.keys():\n bVerts = bdofs[keyID]\n bVertsNew = []\n for i in range(len(bVerts)):\n for j in range(self.dofs_per_node):\n bVertsNew.append(dofs[bVerts[i]-1][j])\n bdofs[keyID] = bVertsNew\n\n if self.return_boundary_elements:\n return allNodes, np.asarray(expandedElements), dofs, bdofs, elementmarkers, boundaryElements\n return allNodes, np.asarray(expandedElements), dofs, bdofs, elementmarkers\n\n if self.return_boundary_elements:\n return allNodes, elements, dofs, bdofs, elementmarkers, boundaryElements\n return allNodes, elements, dofs, bdofs, elementmarkers\n\n def _writeGeoFile(self):\n\n # key is marker, value is a list of point indices (0-based) with that marker\n\n pointMarkers = {}\n curveMarkers = {}\n surfaceMarkers = {}\n volumeMarkers = {}\n\n # WRITE POINTS:\n\n for ID, [coords, elSize, marker] in self.geometry.points.items():\n self.geofile.write(\"Point(%i) = {%s};\\n\" % (\n ID+1, _formatList(coords + [elSize])))\n _insertInSetDict(pointMarkers, marker, ID)\n\n # WRITE CURVES:\n\n for ID, [curveName, points, marker, elOnCurve, distributionString, distributionVal] in self.geometry.curves.items():\n self.geofile.write(\"%s(%i) = {%s};\\n\" % (\n curveName, ID+1, _formatList(points, 1)))\n\n # Transfinite Line{2} = 20 Using Bump 0.05;\n\n if elOnCurve != None:\n distribution = \"\" if distributionString == None else \"Using %s %f\" % (\n distributionString, distributionVal)\n self.geofile.write(\"Transfinite Line{%i} = %i %s;\\n\" % (\n ID+1, elOnCurve+1, distribution))\n\n # +1 on elOnCurve because gmsh actually takes the number of nodes on the curve, not elements on the curve.\n\n _insertInSetDict(curveMarkers, marker, ID)\n\n # WRITE SURFACES:\n\n for ID, [surfName, outerLoop, holes, ID, marker, isStructured] in self.geometry.surfaces.items():\n\n # First we write line loops for the surface edge and holes (if there are any holes):\n\n self._writeLineLoop(outerLoop, ID+1)\n holeIDs = []\n for hole, i in zip(holes, range(len(holes))):\n\n # Create a hopefully unique ID-number for the line loop: Like 10015 or 1540035\n # (If gmsh uses 32-bit integers for IDs then IDs over 214'748 will break)\n\n holeID = 10000 * (ID+1) + 10 * i + 5\n self._writeLineLoop(hole, holeID)\n holeIDs.append(holeID)\n\n # Second, we write the surface itself:\n # If we have hole we want to include them in the surface.\n\n holeString = \"\" if not holeIDs else \", \" + _formatList(holeIDs)\n\n # Like \"Plane Surface(2) = {4, 2, 6, 8}\n\n self.geofile.write(\"%s(%i) = {%s%s};\\n\" % (\n surfName, ID+1, ID+1, holeString))\n\n # Lastly, we make the surface transfinite if it is a structured surface:\n\n if isStructured:\n cornerPoints = set()\n\n # Find the corner points. This is possibly unnecessary since Gmsh can do this automatically.\n\n for c in outerLoop:\n curvePoints = self.geometry.curves[c][1]\n cornerPoints.add(curvePoints[0])\n cornerPoints.add(curvePoints[-1])\n cornerPoints = list(cornerPoints)\n self.geofile.write(\"Transfinite Surface{%i} = {%s};\\n\" % (\n ID+1, _formatList(cornerPoints, 1))) # Like Transfinite Surface{1} = {1,2,3,4};\n\n # Transfinite Surface has an optional argument (about triangle orientation) that is not implemented here.\n\n _insertInSetDict(surfaceMarkers, marker, ID)\n\n # WRITE VOLUMES:\n\n for ID, [outerLoop, holes, ID, marker, isStructured] in self.geometry.volumes.items():\n\n # Surface loops for the volume boundary and holes (if any):\n\n self._writeSurfaceLoop(outerLoop, ID+1)\n holeIDs = []\n for hole, i in zip(holes, range(len(holes))):\n\n # ID-number for the hole surface loop\n\n holeID = 10000 * (ID+1) + 10 * i + 7\n self._writeSurfaceLoop(hole, holeID)\n holeIDs.append(holeID)\n\n # Write the volume itself:\n # If we have hole we want to include them in the surface.\n\n holeString = \"\" if not holeIDs else \" , \" + _formatList(holeIDs)\n\n # Like \"Plane Surface(2) = {4, 2, 6, 8}\n\n self.geofile.write(\n \"Volume(%i) = {%s%s};\\n\" % (ID+1, ID+1, holeString))\n\n # Lastly, we make the volume transfinite if it is a structured volume:\n\n if isStructured:\n self.geofile.write(\"Transfinite Volume{%i} = {};\\n\" % (ID+1))\n\n # We don't find the corner points of the structured volume like we did with the surfaces. Gmsh can actually find the corners automatically.\n\n _insertInSetDict(volumeMarkers, marker, ID)\n\n # MAYBE MAKE QUADS:\n\n if(self.el_type in self._ElementsWithQuadFaces): # If we have quads surfaces on the elements\n self.geofile.write(\"Mesh.RecombineAll = 1;\\n\")\n\n # WRITE POINT MARKERS:\n\n for marker, IDlist in pointMarkers.items():\n if marker != 0:\n self.geofile.write(\"Physical Point(%i) = {%s};\\n\" % (\n marker, _formatList(IDlist, 1)))\n\n # WRITE CURVE MARKERS:\n\n for marker, IDlist in curveMarkers.items():\n self.geofile.write(\"Physical Line(%i) = {%s};\\n\" % (\n marker, _formatList(IDlist, 1)))\n\n # WRITE SURFACE MARKERS:\n\n for marker, IDlist in surfaceMarkers.items():\n self.geofile.write(\"Physical Surface(%i) = {%s};\\n\" % (\n marker, _formatList(IDlist, 1)))\n\n # WRITE SURFACE MARKERS:\n for marker, IDlist in volumeMarkers.items():\n self.geofile.write(\"Physical Volume(%i) = {%s};\\n\" % (\n marker, _formatList(IDlist, 1)))\n\n # If the element type is of an incomplete second order type\n # (i.e it is an 2nd order element without nodes in the middle of the element face),\n # then we need to specify this in the geo-file:\n\n if self.el_type in self._2dOrderIncompleteElms:\n self.geofile.write(\"Mesh.SecondOrderIncomplete=1;\\n\")\n\n def _writeLineLoop(self, lineIndices, loopID):\n\n # endPoints is used to keep track of at which points the curves start and end (i.e the direction of the curves)\n\n endPoints = []\n\n # lineIndices is a list of curve indices (0-based here, but 1-based later in the method)\n\n for i in lineIndices:\n curvePoints = self.geometry.curves[i][1]\n endPoints.append([curvePoints[0], curvePoints[-1]])\n\n # We need the indices to be 1-based rather than 0-based in the next loop. (Some indices will be preceded by a minus-sign)\n\n lineIndices = _offsetIndices(lineIndices, 1)\n isFirstLine = True\n nbrLinesinLoop = len(lineIndices)\n\n # In this loop we reverse the direction of some lines in the LineLoop to make them conform to the format that Gmsh expects.\n\n for k in range(nbrLinesinLoop):\n if isFirstLine and nbrLinesinLoop > 1:\n isFirstLine = False\n\n # If last point of the first line exists in the endpoints of the second line... Do nothing\n\n if endPoints[0][1] in endPoints[1]:\n pass\n\n # Else if the first point in the first line exists in the endpoints of the second line:\n\n elif endPoints[0][0] in endPoints[1]:\n endPoints[0].reverse()\n lineIndices[0] *= -1 # Reverse the direction of the line\n else:\n raise Exception(\n \"ERROR: The first curve of line-loop %i does not link up to the subsequent curve\" % loopID)\n elif endPoints[k][0] == endPoints[k-1][1]:\n pass\n elif endPoints[k][1] == endPoints[k-1][1]:\n endPoints[k].reverse()\n lineIndices[k] *= -1 # Reverse the direction of the line\n else:\n raise Exception(\n \"ERROR: The %i th curve (starting from 0) of a line-loop %i does not link up with the preceding curve\" % (k, loopID))\n if k == nbrLinesinLoop-1 and endPoints[k][1] != endPoints[0][0]:\n # If last line AND the last point of the last curve not equal the first point of the first curve:\n raise Exception(\n \"ERROR: The last curve of a line-loop %i does not join up with the first curve\" % loopID)\n\n # If the model is in 2D we need to make all line loops counter-clockwise so surface normals point in the positive z-direction.\n\n if not self.geometry.is3D:\n lineIndices = self._makeCounterClockwise(lineIndices)\n\n self.geofile.write(\"Line Loop(%i) = {%s};\\n\" % (loopID, _formatList(\n lineIndices))) # (lineIndices are alreay 1-based here)\n\n def _makeCounterClockwise(self, lineIndices):\n '''If the lineIndices describe a line loop that is not counterclockwise,\n this function will return a counterclockwise version of lineIndices\n (i.e. all indices multiplied by -1).\n lineIndices is a list of integers (1-based line indices) that may be negative, but not 0'''\n\n # Method described at http://stackoverflow.com/questions/1165647/how-to-determine-if-a-list-of-polygon-points-are-in-clockwise-order\n\n summa = 0.0 # Counter-clockwise if the sum ends up negative.\n for index in lineIndices:\n sign = -1 if index < 0 else 1\n # Make a copy of the line index that is positive and 0-based.\n realIndex = sign*index - 1\n curveType = self.geometry.curves[realIndex][0]\n pointIDs = self.geometry.curves[realIndex][1]\n if curveType in ['Spline', 'BSpline']:\n points = [self.geometry.points[ID][0]\n for ID in pointIDs] # [[x,y,z], [x,y,z], ...]\n # Reverse the order of the points if the curve direction is reversed.\n points = points if sign == 1 else points[::-1]\n # For every point along the curve except the last:\n for i in range(len(pointIDs)-1):\n # (x2-x1)(y2+y1).\n summa += (points[i+1][0] - points[i][0]) * \\\n (points[i+1][1] + points[i][1])\n elif curveType == 'Circle':\n # We will find a point 'd' on the middle of the circle arc, and use a-d-c as approximation of the arc.\n # 3-by-3 array. The rows are start-center-end points and columns are x,y,z.\n points = np.array([self.geometry.points[ID][0]\n for ID in pointIDs])\n # Reverse the order of the points if the curve direction is reversed.\n points = points if sign == 1 else points[::-1]\n a = points[0, :] # start\n b = points[1, :] # center\n c = points[2, :] # end\n r = np.linalg.norm(a-b) # radius\n d = b + r * (a + 2*b + c) / np.linalg.norm(a + 2*b + c)\n approxArc = np.vstack((a, d, c))\n for i in range(len(approxArc)-1):\n # (x2-x1)(y2+y1).\n summa += (approxArc[i+1, 0] - approxArc[i, 0]) * \\\n (approxArc[i+1, 1] + approxArc[i, 1])\n elif curveType == 'Ellipse':\n # We will find a point 'd' near the middle of the circle arc, and use a-d-c as approximation of the arc.\n # The only difference from the circle above, is that the radius at d is approximated as the mean distance between\n # the center and the two other points.\n # 4-by-3 array. The rows are start-center-majAxis-end points and columns are x,y,z.\n points = np.array([self.geometry.points[ID][0]\n for ID in pointIDs])\n # skip the major axis point (row 2)\n points = points[[0, 1, 3], :]\n # Reverse the order of the points if the curve direction is reversed.\n points = points if sign == 1 else points[::-1]\n a = points[0, :] # start\n b = points[1, :] # center\n c = points[2, :] # end\n r = (np.linalg.norm(a-b) + np.linalg.norm(c-b)) / \\\n 2 # approximate radius\n d = b + r * (a + 2*b + c) / np.linalg.norm(a + 2*b + c)\n approxArc = np.vstack((a, d, c))\n for i in range(len(approxArc)-1):\n # (x2-x1)(y2+y1).\n summa += (approxArc[i+1, 0] - approxArc[i, 0]) * \\\n (approxArc[i+1, 1] + approxArc[i, 1])\n # If the sum is positive the loop (closed polygon) is clockwise, so reverse the direction of all curves:\n if summa > 0:\n lineIndices = [-x for x in lineIndices]\n return lineIndices\n\n def _writeSurfaceLoop(self, outerLoop, ID):\n self.geofile.write(\"Surface Loop(%i) = {%s};\\n\" % (\n ID, _formatList(outerLoop, 1)))\n\n # --- Compatibility properties\n\n @property\n def elType(self):\n return self.el_type\n\n @elType.setter\n def elType(self, value):\n self.el_type = value\n\n @property\n def elSizeFactor(self):\n return self.el_size_factor\n\n @elSizeFactor.setter\n def elSizeFactor(self, value):\n self.el_size_factor = value\n\n @property\n def dofsPerNode(self):\n return self.dofs_per_node\n\n @dofsPerNode.setter\n def dofsPerNode(self, value):\n self.dofs_per_node = value\n\n @property\n def gmshExecPath(self):\n return self.gmsh_exec_path\n\n @gmshExecPath.setter\n def gmshExecPath(self, value):\n self.gmsh_exec_path = value\n\n @property\n def minSize(self):\n return self.min_size\n\n @minSize.setter\n def minSize(self, value):\n self.min_size = value\n\n @property\n def maxSize(self):\n return self.max_size\n\n @maxSize.setter\n def maxSize(self, value):\n self.max_size = value\n\n @property\n def meshingAlgorithm(self):\n return self.meshing_algorithm\n\n @meshingAlgorithm.setter\n def meshingAlgorithm(self, value):\n self.meshing_algorithm = value\n\n @property\n def additionalOptions(self):\n return self.additional_options\n\n @additionalOptions.setter\n def additionalOptions(self, value):\n self.additional_options = value\n\n @property\n def meshDir(self):\n return self.mesh_dir\n\n @meshDir.setter\n def meshDir(self, value):\n self.mesh_dir = value\n\n @property\n def returnBoundaryElements(self):\n return self.return_boundary_elements\n\n @returnBoundaryElements.setter\n def returnBoundaryElements(self, value):\n self.return_boundary_elements = value\n\n\nGmshMesh = GmshMeshGenerator\n\n\ndef trimesh2d(vertices, segments=None, holes=None, maxArea=None, quality=True, dofs_per_node=1, logFilename=\"tri.log\", triangleExecutablePath=None):\n \"\"\"\n Triangulates an area described by a number vertices (vertices) and a set\n of segments that describes a closed polygon. \n\n Parameters:\n\n vertices array [nVertices x 2] with vertices describing the geometry.\n\n [[v0_x, v0_y],\n [ ... ],\n [vn_x, vn_y]]\n\n segments array [nSegments x 3] with segments describing the geometry.\n\n [[s0_v0, s0_v1,marker],\n [ ... ],\n [sn_v0, sn_v1,marker]]\n\n holes [Not currently used]\n\n maxArea Maximum area for triangle. (None)\n\n quality If true, triangles are prevented having angles < 30 degrees. (True)\n\n dofs_per_node Number of degrees of freedom per node.\n\n logFilename Filename for triangle output (\"tri.log\")\n\n Returns:\n\n coords Node coordinates\n\n [[n0_x, n0_y],\n [ ... ],\n [nn_x, nn_y]]\n\n edof Element topology\n\n [[el0_dof1, ..., el0_dofn],\n [ ... ],\n [eln_dof1, ..., eln_dofn]]\n\n dofs Node dofs\n\n [[n0_dof1, ..., n0_dofn],\n [ ... ],\n [nn_dof1, ..., nn_dofn]]\n\n bdofs Boundary dofs. Dictionary containing lists of dofs for\n each boundary marker. Dictionary key = marker id.\n\n \"\"\"\n\n # Check for triangle executable\n\n triangleExecutable = triangleExecutablePath\n\n if triangleExecutable == None:\n triangleExecutable = \"\"\n if sys.platform == \"win32\":\n triangleExecutable = which(\"triangle.exe\")\n else:\n triangleExecutable = which(\"triangle\")\n else:\n if not os.path.exists(triangleExecutable):\n triangleExecutable = None\n\n if triangleExecutable == None:\n error(\"Error: Could not find triangle. Please make sure that the \\ntriangle executable is available on the search path (PATH).\")\n return None, None, None, None\n\n # Create triangle options\n\n options = \"\"\n\n if maxArea != None:\n options += \"-a%f \" % maxArea + \" \"\n if quality:\n options += \"-q\"\n\n # Set initial variables\n\n nSegments = 0\n nHoles = 0\n nAttribs = 0\n nBoundaryMarkers = 1\n nVertices = len(vertices)\n\n # All files are created as temporary files\n\n if not os.path.exists(\"./trimesh.temp\"):\n os.mkdir(\"./trimesh.temp\")\n\n filename = \"./trimesh.temp/polyfile.poly\"\n\n if not segments is None:\n nSegments = len(segments)\n\n if not holes is None:\n nHoles = len(holes)\n\n # Create a .poly file\n\n polyFile = open(filename, \"w\")\n polyFile.write(\"%d 2 %d \\n\" % (nVertices, nAttribs))\n\n i = 0\n\n for vertex in vertices:\n polyFile.write(\"%d %g %g\\n\" % (i, vertex[0], vertex[1]))\n i = i + 1\n\n polyFile.write(\"%d %d \\n\" % (nSegments, nBoundaryMarkers))\n\n i = 0\n\n for segment in segments:\n polyFile.write(\"%d %d %d %d\\n\" %\n (i, segment[0], segment[1], segment[2]))\n i = i + 1\n\n polyFile.write(\"0\\n\")\n\n polyFile.close()\n\n # Execute triangle\n\n os.system(\"%s %s %s > tri.log\" % (triangleExecutable, options, filename))\n\n # Read results from triangle\n\n strippedName = os.path.splitext(filename)[0]\n\n nodeFilename = \"%s.1.node\" % strippedName\n elementFilename = \"%s.1.ele\" % strippedName\n polyFilename = \"%s.1.poly\" % strippedName\n\n # Read vertices\n\n allVertices = None\n boundaryVertices = {}\n\n if os.path.exists(nodeFilename):\n nodeFile = open(nodeFilename, \"r\")\n nodeInfo = list(map(int, nodeFile.readline().split()))\n\n nNodes = nodeInfo[0]\n\n allVertices = np.zeros([nNodes, 2], 'd')\n\n for i in range(nNodes):\n vertexRow = list(map(float, nodeFile.readline().split()))\n\n boundaryMarker = int(vertexRow[3])\n\n if not (boundaryMarker in boundaryVertices):\n boundaryVertices[boundaryMarker] = []\n\n allVertices[i, :] = [vertexRow[1], vertexRow[2]]\n boundaryVertices[boundaryMarker].append(i+1)\n\n nodeFile.close()\n\n # Read elements\n\n elements = []\n\n if os.path.exists(elementFilename):\n elementFile = open(elementFilename, \"r\")\n elementInfo = list(map(int, elementFile.readline().split()))\n\n nElements = elementInfo[0]\n\n elements = np.zeros([nElements, 3], 'i')\n\n for i in range(nElements):\n elementRow = list(map(int, elementFile.readline().split()))\n elements[i, :] = [elementRow[1]+1,\n elementRow[2]+1, elementRow[3]+1]\n\n elementFile.close()\n\n # Clean up\n\n try:\n pass\n # os.remove(filename)\n # os.remove(nodeFilename)\n # os.remove(elementFilename)\n # os.remove(polyFilename)\n except:\n pass\n\n # Add dofs in edof and bcVerts\n\n dofs = cfc.createdofs(np.size(allVertices, 0), dofs_per_node)\n\n if dofs_per_node > 1:\n expandedElements = np.zeros(\n (np.size(elements, 0), 3*dofs_per_node), 'i')\n dofs = cfc.createdofs(np.size(allVertices, 0), dofs_per_node)\n\n elIdx = 0\n\n for elementTopo in elements:\n for i in range(3):\n expandedElements[elIdx, i*dofs_per_node:(\n i*dofs_per_node+dofs_per_node)] = dofs[elementTopo[i]-1, :]\n elIdx += 1\n\n for bVertIdx in boundaryVertices.keys():\n bVert = boundaryVertices[bVertIdx]\n bVertNew = []\n for i in range(len(bVert)):\n for j in range(dofs_per_node):\n bVertNew.append(dofs[bVert[i]-1][j])\n\n boundaryVertices[bVertIdx] = bVertNew\n\n return allVertices, np.asarray(expandedElements), dofs, boundaryVertices\n\n return allVertices, elements, dofs, boundaryVertices\n"
] | [
[
"numpy.matrix",
"numpy.linspace",
"numpy.asarray",
"numpy.vstack",
"matplotlib.pyplot.plot",
"numpy.max",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.concatenate",
"numpy.cross",
"matplotlib.patches.PathPatch",
"matplotlib.pyplot.gca",
"numpy.allclose",
"numpy.reshape",
"matplotlib.pyplot.gcf",
"numpy.sin",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.tricontour",
"matplotlib.pyplot.close",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.text",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.linalg.inv",
"matplotlib.path.Path",
"numpy.asmatrix",
"matplotlib.pyplot.show",
"numpy.array",
"numpy.linalg.norm",
"numpy.cos",
"matplotlib.pyplot.colorbar",
"matplotlib.pyplot.clf",
"numpy.shape",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"numpy.empty"
],
[
"numpy.asarray",
"numpy.linalg.norm",
"numpy.size",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
g-parki/bokeh | [
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81",
"664ead5306bba64609e734d4105c8aa8cfb76d81"
] | [
"examples/plotting/file/slider.py",
"examples/plotting/file/les_mis.py",
"examples/plotting/file/burtin.py",
"examples/webgl/clustering.py",
"tests/unit/bokeh/protocol/messages/test_patch_doc.py",
"tests/unit/bokeh/plotting/test__plot.py",
"examples/plotting/file/hexbin.py"
] | [
"''' An interactive plot of the ``sin`` function. This example demonstrates\nadding widgets and ``CustomJS`` callbacks that can update a plot.\n\n.. bokeh-example-metadata::\n :apis: bokeh.plotting.Figure.line, bokeh.layouts.column, bokeh.layouts.row, bokeh.models.callbacks.CustomJS, bokeh.models.widgets.sliders.Slider\n :refs: :ref:`userguide_interaction_jscallbacks` > :ref:`userguide_interaction_jscallbacks_customjs`\n :keywords: javascript callback\n\n'''\nimport numpy as np\n\nfrom bokeh.layouts import column, row\nfrom bokeh.models import CustomJS, Slider\nfrom bokeh.plotting import ColumnDataSource, figure, show\n\nx = np.linspace(0, 10, 500)\ny = np.sin(x)\n\nsource = ColumnDataSource(data=dict(x=x, y=y))\n\nplot = figure(y_range=(-10, 10), width=400, height=400)\n\nplot.line('x', 'y', source=source, line_width=3, line_alpha=0.6)\n\namp_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Amplitude\")\nfreq_slider = Slider(start=0.1, end=10, value=1, step=.1, title=\"Frequency\")\nphase_slider = Slider(start=0, end=6.4, value=0, step=.1, title=\"Phase\")\noffset_slider = Slider(start=-5, end=5, value=0, step=.1, title=\"Offset\")\n\ncallback = CustomJS(args=dict(source=source, amp=amp_slider, freq=freq_slider, phase=phase_slider, offset=offset_slider),\n code=\"\"\"\n const data = source.data;\n const A = amp.value;\n const k = freq.value;\n const phi = phase.value;\n const B = offset.value;\n const x = data['x']\n const y = data['y']\n for (let i = 0; i < x.length; i++) {\n y[i] = B + A*Math.sin(k*x[i]+phi);\n }\n source.change.emit();\n\"\"\")\n\namp_slider.js_on_change('value', callback)\nfreq_slider.js_on_change('value', callback)\nphase_slider.js_on_change('value', callback)\noffset_slider.js_on_change('value', callback)\n\nlayout = row(\n plot,\n column(amp_slider, freq_slider, phase_slider, offset_slider),\n)\n\nshow(layout)\n",
"''' A reproduction of Mike Bostock's `Les Misérables Co-occurrence`_ chart.\nThis example example demonostrates a basic hover tooltip.\n\n.. bokeh-example-metadata::\n :sampledata: les_mis\n :apis: bokeh.plotting.Figure.rect\n :refs: :ref:`userguide_tools` > :ref:`userguide_tools_hover_tool`\n :keywords: hover, rect, tooltip\n\n.. _Les Misérables Co-occurrence: https://bost.ocks.org/mike/miserables/\n'''\nimport numpy as np\n\nfrom bokeh.plotting import figure, show\nfrom bokeh.sampledata.les_mis import data\n\nnodes = data['nodes']\nnames = [node['name'] for node in sorted(data['nodes'], key=lambda x: x['group'])]\n\nN = len(nodes)\ncounts = np.zeros((N, N))\nfor link in data['links']:\n counts[link['source'], link['target']] = link['value']\n counts[link['target'], link['source']] = link['value']\n\ncolormap = [\"#444444\", \"#a6cee3\", \"#1f78b4\", \"#b2df8a\", \"#33a02c\", \"#fb9a99\",\n \"#e31a1c\", \"#fdbf6f\", \"#ff7f00\", \"#cab2d6\", \"#6a3d9a\"]\n\nxname = []\nyname = []\ncolor = []\nalpha = []\nfor i, node1 in enumerate(nodes):\n for j, node2 in enumerate(nodes):\n xname.append(node1['name'])\n yname.append(node2['name'])\n\n alpha.append(min(counts[i,j]/4.0, 0.9) + 0.1)\n\n if node1['group'] == node2['group']:\n color.append(colormap[node1['group']])\n else:\n color.append('lightgrey')\n\ndata=dict(\n xname=xname,\n yname=yname,\n colors=color,\n alphas=alpha,\n count=counts.flatten(),\n)\n\np = figure(title=\"Les Mis Occurrences\",\n x_axis_location=\"above\", tools=\"hover,save\",\n x_range=list(reversed(names)), y_range=names,\n tooltips = [('names', '@yname, @xname'), ('count', '@count')])\n\np.width = 800\np.height = 800\np.grid.grid_line_color = None\np.axis.axis_line_color = None\np.axis.major_tick_line_color = None\np.axis.major_label_text_font_size = \"7px\"\np.axis.major_label_standoff = 0\np.xaxis.major_label_orientation = np.pi/3\n\np.rect('xname', 'yname', 0.9, 0.9, source=data,\n color='colors', alpha='alphas', line_color=None,\n hover_line_color='black', hover_color='colors')\n\nshow(p)\n",
"''' A reproduction of `Will Burtin's historical visualization`_ of antibiotic\nefficacies.\n\n.. note::\n This chart is reproduced as a demonstration of Bokeh's versatile graphics\n capabilities, but there are better, simpler ways to present this data.\n\n.. bokeh-example-metadata::\n :sampledata: antibiotics\n :apis: bokeh.plotting.Figure.annular_wedge, bokeh.plotting.Figure.circle, bokeh.plotting.Figure.text\n :refs: :ref:`userguide_plotting` > :ref:`userguide_plotting_wedges_arcs`, :ref:`userguide_styling` > :ref:`userguide_styling_visual_properties`\n :keywords: text, wedges\n\n.. _Will Burtin's historical visualization: https://mbostock.github.io/protovis/ex/antibiotics-burtin.html\n\n'''\nfrom math import log, sqrt\n\nimport numpy as np\n\nfrom bokeh.plotting import figure, show\nfrom bokeh.sampledata.antibiotics import data as df\n\ndrug_color = dict([\n (\"Penicillin\", \"#0d3362\"),\n (\"Streptomycin\", \"#c64737\"),\n (\"Neomycin\", \"black\" ),\n])\n\ngram_color = dict([\n (\"negative\", \"#e69584\"),\n (\"positive\", \"#aeaeb8\"),\n])\n\nwidth = 800\nheight = 800\ninner_radius = 90\nouter_radius = 300 - 10\n\nminr = sqrt(log(.001 * 1E4))\nmaxr = sqrt(log(1000 * 1E4))\na = (outer_radius - inner_radius) / (minr - maxr)\nb = inner_radius - a * maxr\n\ndef rad(mic):\n return a * np.sqrt(np.log(mic * 1E4)) + b\n\nbig_angle = 2.0 * np.pi / (len(df) + 1)\nsmall_angle = big_angle / 7\n\np = figure(width=width, height=height, title=\"\",\n x_axis_type=None, y_axis_type=None,\n x_range=(-420, 420), y_range=(-420, 420),\n min_border=0, outline_line_color=\"black\",\n background_fill_color=\"#f0e1d2\")\n\np.xgrid.grid_line_color = None\np.ygrid.grid_line_color = None\n\n# annular wedges\nangles = np.pi/2 - big_angle/2 - df.index.to_series()*big_angle\ncolors = [gram_color[gram] for gram in df.gram]\np.annular_wedge(\n 0, 0, inner_radius, outer_radius, -big_angle+angles, angles, color=colors,\n)\n\n# small wedges\np.annular_wedge(0, 0, inner_radius, rad(df.penicillin),\n -big_angle+angles+5*small_angle, -big_angle+angles+6*small_angle,\n color=drug_color['Penicillin'])\np.annular_wedge(0, 0, inner_radius, rad(df.streptomycin),\n -big_angle+angles+3*small_angle, -big_angle+angles+4*small_angle,\n color=drug_color['Streptomycin'])\np.annular_wedge(0, 0, inner_radius, rad(df.neomycin),\n -big_angle+angles+1*small_angle, -big_angle+angles+2*small_angle,\n color=drug_color['Neomycin'])\n\n# circular axes and lables\nlabels = np.power(10.0, np.arange(-3, 4))\nradii = a * np.sqrt(np.log(labels * 1E4)) + b\np.circle(0, 0, radius=radii, fill_color=None, line_color=\"white\")\np.text(0, radii[:-1], [str(r) for r in labels[:-1]],\n text_font_size=\"11px\", text_align=\"center\", text_baseline=\"middle\")\n\n# radial axes\np.annular_wedge(0, 0, inner_radius-10, outer_radius+10,\n -big_angle+angles, -big_angle+angles, color=\"black\")\n\n# bacteria labels\nxr = radii[0]*np.cos(np.array(-big_angle/2 + angles))\nyr = radii[0]*np.sin(np.array(-big_angle/2 + angles))\nlabel_angle=np.array(-big_angle/2+angles)\nlabel_angle[label_angle < -np.pi/2] += np.pi # easier to read labels on the left side\np.text(xr, yr, df.bacteria, angle=label_angle,\n text_font_size=\"12px\", text_align=\"center\", text_baseline=\"middle\")\n\n# OK, these hand drawn legends are pretty clunky, will be improved in future release\np.circle([-40, -40], [-370, -390], color=list(gram_color.values()), radius=5)\np.text([-30, -30], [-370, -390], text=[\"Gram-\" + gr for gr in gram_color.keys()],\n text_font_size=\"9px\", text_align=\"left\", text_baseline=\"middle\")\n\np.rect([-40, -40, -40], [18, 0, -18], width=30, height=13,\n color=list(drug_color.values()))\np.text([-15, -15, -15], [18, 0, -18], text=list(drug_color),\n text_font_size=\"12px\", text_align=\"left\", text_baseline=\"middle\")\n\nshow(p)\n",
"''' Example inspired by an example from the scikit-learn project:\n\nhttp://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n\n'''\nimport numpy as np\nfrom sklearn import cluster, datasets\nfrom sklearn.preprocessing import StandardScaler\n\nfrom bokeh.layouts import column, row\nfrom bokeh.plotting import figure, output_file, show\n\nprint(\"\\n\\n*** This example may take several seconds to run before displaying. ***\\n\\n\")\n\nN = 50000\nPLOT_SIZE = 400\n\n# generate datasets.\nnp.random.seed(0)\nnoisy_circles = datasets.make_circles(n_samples=N, factor=.5, noise=.04)\nnoisy_moons = datasets.make_moons(n_samples=N, noise=.05)\ncenters = [(-2, 3), (2, 3), (-2, -3), (2, -3)]\nblobs1 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.4, random_state=8)\nblobs2 = datasets.make_blobs(centers=centers, n_samples=N, cluster_std=0.7, random_state=8)\n\ncolors = np.array([x for x in ('#00f', '#0f0', '#f00', '#0ff', '#f0f', '#ff0')])\ncolors = np.hstack([colors] * 20)\n\n# create clustering algorithms\ndbscan = cluster.DBSCAN(eps=.2)\nbirch = cluster.Birch(n_clusters=2)\nmeans = cluster.MiniBatchKMeans(n_clusters=2)\nspectral = cluster.SpectralClustering(n_clusters=2, eigen_solver='arpack', affinity=\"nearest_neighbors\")\naffinity = cluster.AffinityPropagation(damping=.9, preference=-200)\n\n# change here, to select clustering algorithm (note: spectral is slow)\nalgorithm = dbscan # <- SELECT ALG\n\nplots =[]\nfor dataset in (noisy_circles, noisy_moons, blobs1, blobs2):\n X, y = dataset\n X = StandardScaler().fit_transform(X)\n\n # predict cluster memberships\n algorithm.fit(X)\n if hasattr(algorithm, 'labels_'):\n y_pred = algorithm.labels_.astype(int)\n else:\n y_pred = algorithm.predict(X)\n\n p = figure(output_backend=\"webgl\", title=algorithm.__class__.__name__,\n width=PLOT_SIZE, height=PLOT_SIZE)\n\n p.circle(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), alpha=0.1,)\n\n plots.append(p)\n\n# generate layout for the plots\nlayout = column(row(plots[:2]), row(plots[2:]))\n\noutput_file(\"clustering.html\", title=\"clustering with sklearn\")\n\nshow(layout)\n",
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import annotations # isort:skip\n\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nfrom json import loads\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nimport bokeh.document as document\nfrom bokeh.core.properties import Instance, Int, Nullable\nfrom bokeh.document.events import (\n ColumnDataChangedEvent,\n ColumnsPatchedEvent,\n ColumnsStreamedEvent,\n ModelChangedEvent,\n RootAddedEvent,\n RootRemovedEvent,\n)\nfrom bokeh.model import Model\nfrom bokeh.models import ColumnDataSource\nfrom bokeh.protocol import Protocol\n\n# Module under test\nfrom bokeh.protocol.messages.patch_doc import process_document_events # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\nproto = Protocol()\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\nclass AnotherModelInTestPatchDoc(Model):\n bar = Int(1)\n\nclass SomeModelInTestPatchDoc(Model):\n foo = Int(2)\n child = Nullable(Instance(Model))\n\n\nclass TestPatchDocument:\n def _sample_doc(self):\n doc = document.Document()\n another = AnotherModelInTestPatchDoc()\n doc.add_root(SomeModelInTestPatchDoc(child=another))\n doc.add_root(SomeModelInTestPatchDoc())\n return doc\n\n def test_create_no_events(self) -> None:\n with pytest.raises(ValueError):\n proto.create(\"PATCH-DOC\", [])\n\n def test_create_multiple_docs(self) -> None:\n sample1 = self._sample_doc()\n obj1 = next(iter(sample1.roots))\n event1 = ModelChangedEvent(sample1, obj1, 'foo', obj1.foo, 42, 42)\n\n sample2 = self._sample_doc()\n obj2 = next(iter(sample2.roots))\n event2 = ModelChangedEvent(sample2, obj2, 'foo', obj2.foo, 42, 42)\n with pytest.raises(ValueError):\n proto.create(\"PATCH-DOC\", [event1, event2])\n\n def test_create_model_changed(self) -> None:\n sample = self._sample_doc()\n obj = next(iter(sample.roots))\n event = ModelChangedEvent(sample, obj, 'foo', obj.foo, 42, 42)\n proto.create(\"PATCH-DOC\", [event])\n\n def test_create_then_apply_model_changed(self) -> None:\n sample = self._sample_doc()\n\n foos = []\n for r in sample.roots:\n foos.append(r.foo)\n assert foos == [ 2, 2 ]\n\n obj = next(iter(sample.roots))\n assert obj.foo == 2\n event = ModelChangedEvent(sample, obj, 'foo', obj.foo, 42, 42)\n msg = proto.create(\"PATCH-DOC\", [event])\n\n copy = document.Document.from_json_string(sample.to_json_string())\n msg.apply_to_document(copy)\n\n foos = []\n for r in copy.roots:\n foos.append(r.foo)\n foos.sort()\n assert foos == [ 2, 42 ]\n\n def test_patch_event_contains_setter(self) -> None:\n sample = self._sample_doc()\n root = None\n other_root = None\n for r in sample.roots:\n if r.child is not None:\n root = r\n else:\n other_root = r\n assert root is not None\n assert other_root is not None\n new_child = AnotherModelInTestPatchDoc(bar=56)\n\n cds = ColumnDataSource(data={'a': np.array([0., 1., 2.])})\n sample.add_root(cds)\n\n mock_session = object()\n def sample_document_callback_assert(event):\n \"\"\"Asserts that setter is correctly set on event\"\"\"\n assert event.setter is mock_session\n sample.on_change(sample_document_callback_assert)\n\n # Model property changed\n event = ModelChangedEvent(sample, root, 'child', root.child, new_child, new_child)\n msg = proto.create(\"PATCH-DOC\", [event])\n msg.apply_to_document(sample, mock_session)\n assert msg.buffers == []\n\n # RootAdded\n event2 = RootAddedEvent(sample, root)\n msg2 = proto.create(\"PATCH-DOC\", [event2])\n msg2.apply_to_document(sample, mock_session)\n assert msg2.buffers == []\n\n # RootRemoved\n event3 = RootRemovedEvent(sample, root)\n msg3 = proto.create(\"PATCH-DOC\", [event3])\n msg3.apply_to_document(sample, mock_session)\n assert msg3.buffers == []\n\n # ColumnsStreamed\n event4 = ModelChangedEvent(sample, cds, 'data', 10, None, None,\n hint=ColumnsStreamedEvent(sample, cds, {\"a\": [3]}, None, mock_session))\n msg4 = proto.create(\"PATCH-DOC\", [event4])\n msg4.apply_to_document(sample, mock_session)\n assert msg4.buffers == []\n\n # ColumnsPatched\n event5 = ModelChangedEvent(sample, cds, 'data', 10, None, None,\n hint=ColumnsPatchedEvent(sample, cds, {\"a\": [(0, 11)]}))\n msg5 = proto.create(\"PATCH-DOC\", [event5])\n msg5.apply_to_document(sample, mock_session)\n assert msg5.buffers == []\n\n # ColumnDataChanged, use_buffers=False\n event6 = ModelChangedEvent(sample, cds, 'data', {'a': np.array([0., 1.])}, None, None,\n hint=ColumnDataChangedEvent(sample, cds))\n msg6 = proto.create(\"PATCH-DOC\", [event6], use_buffers=False)\n msg6.apply_to_document(sample, mock_session)\n assert msg6.buffers == []\n\n print(cds.data)\n # ColumnDataChanged, use_buffers=True\n event7 = ModelChangedEvent(sample, cds, 'data', {'a': np.array([0., 1.])}, None, None,\n hint=ColumnDataChangedEvent(sample, cds))\n msg7 = proto.create(\"PATCH-DOC\", [event7])\n # can't test apply, doc not set up to *receive* binary buffers\n # msg7.apply_to_document(sample, mock_session)\n assert len(msg7.buffers) == 1\n buf = msg7.buffers.pop()\n assert len(buf) == 2\n assert isinstance(buf[0], dict)\n assert list(buf[0]) == ['id']\n\n # reports CDS buffer *as it is* Normally events called by setter and\n # value in local object would have been already mutated.\n assert buf[1] == np.array([11., 1., 2., 3]).tobytes()\n\nclass _Event:\n def __init__(self, refs, bufs) -> None:\n self.refs=refs\n self.bufs=bufs\n def generate(self, refs, bufs):\n refs.update(self.refs)\n if bufs is not None:\n bufs.extend(self.bufs)\n return \"junk\"\n\nclass _M(Model):\n pass\n\ndef test_process_document_events_no_refs() -> None:\n e = _Event([], [])\n r, bufs = process_document_events([e])\n assert bufs == []\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 0\n assert len(json['events']) == 1\n assert json['events'] == ['junk']\n\ndef test_process_document_events_with_refs() -> None:\n e = _Event([_M(),_M()], [])\n r, bufs = process_document_events([e])\n assert bufs == []\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 2\n assert len(json['events']) == 1\n assert json['events'] == ['junk']\n\ndef test_process_document_events_no_buffers() -> None:\n e = _Event([], [])\n r, bufs = process_document_events([e])\n assert bufs == []\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 0\n assert len(json['events']) == 1\n assert json['events'] == ['junk']\n\ndef test_process_document_events_with_buffers() -> None:\n e = _Event([], [1,2])\n r, bufs = process_document_events([e])\n assert bufs == [1, 2]\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 0\n assert len(json['events']) == 1\n assert json['events'] == ['junk']\n\ndef test_process_document_events_mixed() -> None:\n e1 = _Event([], [1,2])\n e2 = _Event([_M(),_M(),_M()], [3,4, 5])\n e3 = _Event([_M(),_M()], [])\n r, bufs = process_document_events([e1, e2, e3])\n assert bufs == [1, 2, 3, 4, 5]\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 5\n assert len(json['events']) == 3\n assert json['events'] == ['junk', 'junk', 'junk']\n\ndef test_process_document_events_with_buffers_and_use_buffers_false() -> None:\n e = _Event([], [1,2])\n r, bufs = process_document_events([e], use_buffers=False)\n assert bufs == []\n json = loads(r)\n assert sorted(list(json)) == ['events', 'references']\n assert len(json['references']) == 0\n assert len(json['events']) == 1\n assert json['events'] == ['junk']\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n",
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2022, Anaconda, Inc., and Bokeh Contributors.\n# All rights reserved.\n#\n# The full license is in the file LICENSE.txt, distributed with this software.\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Boilerplate\n#-----------------------------------------------------------------------------\nfrom __future__ import annotations # isort:skip\n\nimport pytest ; pytest\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport datetime\n\n# External imports\nimport numpy as np\n\n# Bokeh imports\nfrom bokeh.models import (\n CategoricalAxis,\n CategoricalScale,\n DataRange1d,\n DatetimeAxis,\n FactorRange,\n LinearAxis,\n LinearScale,\n LogAxis,\n LogScale,\n MercatorAxis,\n Range1d,\n)\n\n# Module under test\nimport bokeh.plotting._plot as bpp # isort:skip\n\n#-----------------------------------------------------------------------------\n# Setup\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n\nclass test_get_scale_factor_range:\n def test_numeric_range_linear_axis() -> None:\n s = bpp.get_scale(Range1d(), \"linear\")\n assert isinstance(s, LinearScale)\n\n s = bpp.get_scale(Range1d(), \"datetime\")\n assert isinstance(s, LinearScale)\n\n s = bpp.get_scale(Range1d(), \"auto\")\n assert isinstance(s, LinearScale)\n\n def test_numeric_range_log_axis() -> None:\n s = bpp.get_scale(DataRange1d(), \"log\")\n assert isinstance(s, LogScale)\n\n def test_factor_range() -> None:\n s = bpp.get_scale(FactorRange(), \"auto\")\n assert isinstance(s, CategoricalScale)\n\n\nclass Test_get_range:\n def test_with_None(self) -> None:\n r = bpp.get_range(None)\n assert isinstance(r, DataRange1d)\n\n def test_with_Range(self) -> None:\n for t in [Range1d, DataRange1d, FactorRange]:\n rng = t()\n r = bpp.get_range(rng)\n assert r is rng\n\n def test_with_ndarray(self) -> None:\n r = bpp.get_range(np.array([10, 20]))\n assert isinstance(r, Range1d)\n assert r.start == 10\n assert r.end == 20\n\n def test_with_too_long_ndarray(self) -> None:\n with pytest.raises(ValueError):\n bpp.get_range(np.array([10, 20, 30]))\n\n def test_with_ndarray_factors(self) -> None:\n f = np.array([\"Crosby\", \"Stills\", \"Nash\", \"Young\"])\n r = bpp.get_range(f)\n assert isinstance(r, FactorRange)\n assert r.factors == list(f)\n\n def test_with_series(self, pd) -> None:\n r = bpp.get_range(pd.Series([20, 30]))\n assert isinstance(r, Range1d)\n assert r.start == 20\n assert r.end == 30\n\n def test_with_too_long_series(self, pd) -> None:\n with pytest.raises(ValueError):\n bpp.get_range(pd.Series([20, 30, 40]))\n\n def test_with_string_seq(self) -> None:\n f = [\"foo\" ,\"end\", \"baz\"]\n for t in [list, tuple]:\n r = bpp.get_range(t(f))\n assert isinstance(r, FactorRange)\n # FactorRange accepts Seq, but get_range always sets a list copy\n assert r.factors == f\n\n def test_with_float_bounds(self) -> None:\n r = bpp.get_range((1.2, 10))\n assert isinstance(r, Range1d)\n assert r.start == 1.2\n assert r.end == 10\n\n r = bpp.get_range([1.2, 10])\n assert isinstance(r, Range1d)\n assert r.start == 1.2\n assert r.end == 10\n\n def test_with_pandas_group(self, pd) -> None:\n from bokeh.sampledata.iris import flowers\n g = flowers.groupby('species')\n r = bpp.get_range(g)\n assert isinstance(r, FactorRange)\n assert r.factors == ['setosa', 'versicolor', 'virginica'] # should always be sorted\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n_RANGES = [Range1d(), DataRange1d(), FactorRange()]\n\n\nclass Test__get_axis_class:\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_None(self, range) -> None:\n assert(bpp._get_axis_class(None, range, 0)) == (None, {})\n assert(bpp._get_axis_class(None, range, 1)) == (None, {})\n\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_linear(self, range) -> None:\n assert(bpp._get_axis_class(\"linear\", range, 0)) == (LinearAxis, {})\n assert(bpp._get_axis_class(\"linear\", range, 1)) == (LinearAxis, {})\n\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_log(self, range) -> None:\n assert(bpp._get_axis_class(\"log\", range, 0)) == (LogAxis, {})\n assert(bpp._get_axis_class(\"log\", range, 1)) == (LogAxis, {})\n\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_datetime(self, range) -> None:\n assert(bpp._get_axis_class(\"datetime\", range, 0)) == (DatetimeAxis, {})\n assert(bpp._get_axis_class(\"datetime\", range, 1)) == (DatetimeAxis, {})\n\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_mercator(self, range) -> None:\n assert(bpp._get_axis_class(\"mercator\", range, 0)) == (MercatorAxis, {'dimension': 'lon'})\n assert(bpp._get_axis_class(\"mercator\", range, 1)) == (MercatorAxis, {'dimension': 'lat'})\n\n def test_axis_type_auto(self) -> None:\n assert(bpp._get_axis_class(\"auto\", FactorRange(), 0)) == (CategoricalAxis, {})\n assert(bpp._get_axis_class(\"auto\", FactorRange(), 1)) == (CategoricalAxis, {})\n assert(bpp._get_axis_class(\"auto\", DataRange1d(), 0)) == (LinearAxis, {})\n assert(bpp._get_axis_class(\"auto\", DataRange1d(), 1)) == (LinearAxis, {})\n assert(bpp._get_axis_class(\"auto\", Range1d(), 0)) == (LinearAxis, {})\n assert(bpp._get_axis_class(\"auto\", Range1d(), 1)) == (LinearAxis, {})\n assert(bpp._get_axis_class(\"auto\", Range1d(start=datetime.datetime(2018, 3, 21)), 0)) == (DatetimeAxis, {})\n assert(bpp._get_axis_class(\"auto\", Range1d(start=datetime.datetime(2018, 3, 21)), 1)) == (DatetimeAxis, {})\n\n @pytest.mark.parametrize('range', _RANGES)\n def test_axis_type_error(self, range) -> None:\n with pytest.raises(ValueError):\n bpp._get_axis_class(\"junk\", range, 0)\n with pytest.raises(ValueError):\n bpp._get_axis_class(\"junk\", range, 1)\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n",
"''' A automatic hexbin plot using randomly selected points. This chart shows\n500 points points from a normal distribution binned into hexagonal tiles. A\nhover tooltip displays information for each tile.\n\n.. bokeh-example-metadata::\n :apis: bokeh.plotting.Figure.hexbin\n :refs: :ref:`userguide_plotting` > :ref:`userguide_plotting_hex`\n :keywords: hex, hexbin, hover, tooltip\n\n'''\nimport numpy as np\n\nfrom bokeh.models import HoverTool\nfrom bokeh.plotting import figure, show\n\nn = 500\nx = 2 + 2*np.random.standard_normal(n)\ny = 2 + 2*np.random.standard_normal(n)\n\np = figure(title=\"Hexbin for 500 points\", match_aspect=True,\n tools=\"wheel_zoom,reset\", background_fill_color='#440154')\np.grid.visible = False\n\nr, bins = p.hexbin(x, y, size=0.5, hover_color=\"pink\", hover_alpha=0.8)\n\np.circle(x, y, color=\"white\", size=1)\n\np.add_tools(HoverTool(\n tooltips=[(\"count\", \"@c\"), (\"(q,r)\", \"(@q, @r)\")],\n mode=\"mouse\", point_policy=\"follow_mouse\", renderers=[r]\n))\n\nshow(p)\n"
] | [
[
"numpy.linspace",
"numpy.sin"
],
[
"numpy.zeros"
],
[
"numpy.arange",
"numpy.log",
"numpy.array"
],
[
"numpy.hstack",
"sklearn.cluster.AffinityPropagation",
"numpy.random.seed",
"sklearn.datasets.make_moons",
"sklearn.cluster.DBSCAN",
"sklearn.cluster.SpectralClustering",
"sklearn.preprocessing.StandardScaler",
"sklearn.datasets.make_circles",
"sklearn.cluster.Birch",
"sklearn.cluster.MiniBatchKMeans",
"numpy.array",
"sklearn.datasets.make_blobs"
],
[
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.random.standard_normal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KESHAmambo/IEEE_802_11ad_beamforming_simulation | [
"93328a41d9c044ee7596d02e360fb3b5f2250ec0",
"93328a41d9c044ee7596d02e360fb3b5f2250ec0"
] | [
"graph/ap (0, 0, 0), 12 sect, mob 4 sect/dynamic/degree variation, 3 slots, 25 stations/with 0/avg_dist.py",
"graph/ap (0, 0, 0), 12 sect, mob 4 sect/3 slots, 25 stations/hexbin.py"
] | [
"\"\"\"\nDistribution plot options\n=========================\n\n\"\"\"\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\navgArr0 = [684.0322959592726, 884.7363009861817, 888.8322884189091, 942.080300986182, 970.7522934458182, 991.2322959592727, 991.2323009861818, 1011.712300986182, 1036.288295959273, 1044.4803034996364, 1060.8642859054546, 1069.056306013091, 1077.2482984727276, 1122.304306013091, 1130.4963034996365, 1134.5923034996363, 1142.7843009861817, 1146.8802884189092, 1159.1682984727274, 1163.2643085265454, 1224.7042884189093, 1232.8963110400005, 1249.280295959273, 1261.5682934458184, 1265.6642833920002, 1282.0483060130912, 1290.2402984727278, 1298.4323110400003, 1331.2003034996367, 1351.680290932364, 1363.968290932364, 1363.9682959592733, 1409.0243009861824, 1409.024303499637, 1413.1202984727277, 1413.120306013091, 1429.5042884189097, 1429.5043060130913, 1470.464293445819, 1495.0402959592734, 1527.808285905455, 1556.4803034996369, 1593.3442934458185, 1613.8243009861826, 1634.3042859054556, 1638.400285905455, 1650.6882884189097, 1662.9763009861827, 1699.8403085265466, 1740.800303499637, 1757.1842959592736, 1757.1843009861827, 1761.2803160669096, 1781.7602909323643, 1794.0482959592732, 1794.0483034996373, 1802.2402909323644, 1810.432298472728, 1822.7202859054553, 1830.9122959592737, 1843.2003034996371, 1855.4882959592733, 1863.680295959274, 1867.7763009861828, 1892.3522909323647, 1908.736311040001, 1929.21628841891, 1982.4643135534557, 2002.944295959274, 2048.0002984727284, 2064.3842859054553, 2158.5922934458195, 2207.7442934458195, 2228.2242833920013, 2240.512300986183, 2269.1842884189105, 2285.568283392001, 2338.8162984727287, 2351.104295959274, 2416.6402984727292, 2424.832290932365, 2461.696298472729, 2478.080295959274, 2568.1922934458194, 2654.20829344582, 2715.648303499638, 2756.6083009861827, 2801.66429344582, 2936.8322909323656, 2990.0803160669107, 3043.3282984727293, 3117.0563210938194, 3125.2482959592744, 3170.304298472728, 3280.896278365093, 3379.200290932365, 3506.176295959273, 3850.240300986182, 3887.104285905455, 5201.920300986177]\navgArr1 = [262.14550912000004, 266.2412008029091, 270.33744209454545, 278.5297370065454, 278.529804032, 282.62589786763635, 286.72168338618184, 286.7217504116363, 286.72184424727277, 286.72195148800006, 286.72197829818185, 286.72203191854555, 286.7223670458182, 290.81752252509096, 290.81765657600005, 290.8178442472727, 290.81785765236367, 290.81824639999996, 294.9136699810909, 294.9136967912728, 294.91380403200003, 294.9138576523636, 294.91391127272726, 294.91393808290906, 294.91415256436363, 299.00956274036366, 299.0096699810909, 299.0097772218182, 299.0098710574546, 299.01011234909095, 299.01017937454543, 299.0102464, 299.0103268305455, 299.01039385600006, 303.1053348538182, 303.1056163607273, 303.1056833861818, 303.10569679127275, 303.1057101963637, 303.1058576523637, 303.10588446254553, 303.10589786763643, 303.1059112727273, 303.1060587287273, 303.10623299490913, 307.2014823098182, 307.20156274036367, 307.20166998109096, 307.20184424727273, 307.20213915927275, 307.2021659694545, 307.2021659694546, 307.20225980509093, 307.20235364072727, 307.2026753629091, 311.29757614545457, 311.29758955054547, 311.2976431709091, 311.29772360145455, 311.29780403200004, 311.2978710574546, 311.2978978676364, 311.2979112727273, 311.2979782981818, 311.29801851345456, 311.29825980509094, 311.2982732101819, 311.2986351476364, 315.3934152843637, 315.3934957149091, 315.3935225250909, 315.39369679127276, 315.39371019636366, 315.39371019636366, 315.3937504116364, 315.39380403200005, 315.393951488, 315.3939514880001, 315.3939782981819, 315.39415256436365, 315.39484962909097, 319.4894689047273, 319.4895761454546, 319.48960295563637, 319.48961636072727, 319.489656576, 319.4897772218182, 319.4897906269091, 319.48981743709095, 319.48989786763644, 319.4900051083636, 319.49015256436377, 319.4905145018182, 319.4905547170909, 319.49078260363643, 319.4908094138182, 323.58536166400006, 323.5856163607273, 323.5856163607273, 323.58564317090907, 323.5856699810909, 323.5857638167273, 323.58580403200006, 323.5858174370909, 323.58585765236364, 323.5858576523637, 323.58589786763645, 323.5859380829091, 323.5860185134545, 323.5860721338182, 323.5860721338182, 323.58608553890906, 323.58612575418186, 323.58613915927276, 323.58613915927276, 323.58613915927276, 323.58625980509095, 323.5864340712728, 323.5865145018182, 323.5865681221818, 323.58679600872733, 327.6815493352728, 327.6816967912727, 327.68169679127277, 327.68179062690905, 327.68187105745454, 327.68192467781824, 327.6819380829092, 327.68196489309094, 327.6820319185455, 327.68204532363643, 327.68205872872727, 327.68205872872727, 327.68207213381817, 327.68208553890906, 327.68213915927277, 327.6821927796364, 327.68220618472725, 327.68228661527274, 327.68232683054543, 327.6823268305455, 327.6823536407273, 327.6823670458182, 327.6824742865455, 327.6824876916364, 327.6831579461819, 331.77730804363637, 331.77741528436366, 331.77741528436366, 331.77742868945455, 331.77757614545453, 331.77758955054543, 331.7776297658182, 331.77777722181816, 331.77791127272735, 331.77792467781813, 331.77797829818184, 331.77797829818184, 331.7779917032728, 331.7779917032728, 331.77800510836363, 331.7780453236363, 331.77805872872733, 331.7781525643636, 331.77819277963636, 331.7782061847273, 331.77842066618183, 331.7784340712727, 331.7784340712728, 331.7790909207274, 335.873656576, 335.873656576, 335.8736967912727, 335.87379062690917, 335.8738308421818, 335.87388446254545, 335.87388446254556, 335.8738978676364, 335.8739246778183, 335.87393808290915, 335.8739782981818, 335.8739917032728, 335.87400510836363, 335.8740587287273, 335.87416596945457, 335.8741793745454, 335.8741793745455, 335.87419277963636, 335.87421958981815, 335.87423299490916, 335.8743134254546, 335.8743402356364, 335.87440726109094, 335.87450109672733, 339.96957614545454, 339.969656576, 339.96966998109093, 339.9696833861819, 339.9696833861819, 339.9696967912727, 339.96975041163637, 339.9697772218182, 339.9697772218182, 339.96983084218186, 339.96984424727276, 339.96988446254545, 339.9699246778182, 339.969951488, 339.969951488, 339.9699782981818, 339.96999170327274, 339.97000510836364, 339.97001851345453, 339.9700587287273, 339.9700587287273, 339.97007213381823, 339.9701525643637, 339.9701659694546, 339.9703000203636, 344.0654823098182, 344.0656163607273, 344.06565657600004, 344.0657236014545, 344.0658308421818, 344.06587105745456, 344.0659380829091, 344.0659917032727, 344.06604532363633, 344.0661391592727, 344.0661927796364, 344.06628661527276, 344.06632683054545, 344.06643407127274, 344.06690324945464, 344.06718475636364, 348.1612946385454, 348.16176381672733, 348.1618576523636, 348.16187105745456, 348.16191127272725, 348.1619112727273, 348.1619112727273, 348.16192467781815, 348.1619917032728, 348.16200510836364, 348.1620319185455, 348.16207213381824, 348.1621257541819, 348.1621659694546, 348.1621793745455, 348.1622061847273, 348.16225980509097, 348.1623804509091, 348.162688768, 352.25761636072735, 352.2577236014546, 352.2577370065454, 352.25788446254546, 352.2578978676364, 352.2579380829091, 352.2579380829091, 352.25801851345454, 352.25803191854544, 352.2580453236364, 352.25805872872735, 352.2580855389091, 352.25811234909094, 352.2581257541819, 352.2581659694546, 352.2581793745454, 352.25817937454553, 352.2582329949091, 352.2582464000001, 352.2582598050909, 352.2582866152727, 352.2583536407272, 352.2584072610909, 352.2586083374546, 352.258702173091, 352.25884962909106, 356.35349571490906, 356.35364317090904, 356.3537370065455, 356.35380403199997, 356.3538174370909, 356.3538710574545, 356.3538710574546, 356.35388446254547, 356.35393808290905, 356.3540051083637, 356.35401851345455, 356.3540721338182, 356.35411234909094, 356.3541793745455, 356.3542732101819, 356.35428661527277, 356.3543134254545, 356.354393856, 356.35444747636365, 356.3544876916364, 356.35450109672723, 356.3545279069091, 356.35452790690914, 356.35466195781817, 356.3552517818182, 360.4494554996363, 360.4495627403636, 360.4497370065455, 360.4497638167273, 360.44977722181824, 360.44988446254547, 360.44989786763637, 360.44995148800007, 360.45012575418184, 360.45013915927274, 360.4501659694546, 360.4501927796363, 360.4502464, 360.45032683054546, 360.4503268305455, 360.4503536407273, 360.450393856, 360.45043407127275, 360.45051450181825, 360.45051450181825, 360.45087643927286, 360.4510641105454, 360.4515332887273, 364.54554933527265, 364.5456699810909, 364.54568338618185, 364.5457370065454, 364.54573700654544, 364.5458174370908, 364.5458576523636, 364.54589786763637, 364.54597829818186, 364.54607213381814, 364.54616596945453, 364.54617937454555, 364.5462732101819, 364.5463134254545, 364.5463402356363, 364.5463670458182, 364.5463804509091, 364.54643407127276, 364.5464474763636, 364.5464742865455, 364.5465010967273, 364.5465547170909, 364.54660833745453, 364.54684962909096, 364.5469032494546, 368.64156274036367, 368.6416431709091, 368.6418174370909, 368.6419246778181, 368.64204532363635, 368.6420721338182, 368.64209894399994, 368.64211234909095, 368.64225980509093, 368.6422732101818, 368.6422866152727, 368.6423536407272, 368.6423536407273, 368.6423804509091, 368.6424340712727, 368.6424340712727, 368.64244747636366, 368.6426351476364, 368.6426619578182, 368.64267536290913, 368.64303730036363, 368.6433590225455, 372.7374957149091, 372.73772360145455, 372.73773700654544, 372.7377504116364, 372.7378844625455, 372.7379246778182, 372.73801851345456, 372.73811234909095, 372.73813915927275, 372.73823299490914, 372.7382866152727, 372.7385279069091, 372.73856812218185, 372.73862174254543, 372.73864855272734, 372.738688768, 372.7387960087272, 372.73891665454545, 376.83368338618175, 376.83377722181814, 376.8338710574546, 376.8339246778181, 376.83392467781823, 376.83396489309087, 376.8339782981819, 376.8339782981819, 376.83412575418185, 376.8342329949091, 376.83428661527273, 376.83436704581817, 376.8344072610909, 376.83447428654546, 376.8347289832728, 376.83487643927276, 376.8353054021819, 380.9296967912727, 380.92991127272734, 380.9299380829091, 380.9300453236364, 380.93013915927276, 380.93016596945455, 380.93021958981814, 380.93031342545453, 380.9303268305455, 380.9305547170909, 380.9307423883636, 380.93082281890906, 380.93119816145446, 385.02557614545447, 385.02576381672736, 385.02577722181815, 385.02588446254543, 385.02588446254543, 385.02612575418175, 385.0262464, 385.02638045090913, 385.02640726109087, 385.0264474763636, 385.0265815272727, 385.02667536290903, 385.0267960087273, 385.0269434647273, 389.1215761454546, 389.1217772218182, 389.1220185134545, 389.1228630341818, 389.1233188072728, 393.2177772218181, 393.21777722181815, 393.21792467781813, 393.21792467781825, 393.21808553890907, 393.21891665454547, 397.3138442472728, 397.3139246778182, 397.3139782981819, 397.3139917032726, 397.31401851345464, 397.3140855389091, 397.314098944, 397.31419277963636, 397.3143268305455, 397.3144072610908, 397.3145547170908, 397.3146753629091, 397.31507751563635, 401.40975041163637, 401.40975041163637, 401.4098576523637, 401.40996489309094, 401.4100721338182, 401.41028661527275, 401.4103670458182, 401.4103804509091, 401.4105547170909, 401.4106083374546, 405.5054689047273, 405.50566998109093, 405.5059246778182, 405.50609894400003, 405.50627321018175, 405.5071177309091, 405.50729199709093, 409.60160295563634, 409.6025681221818, 409.6026083374545, 409.6028496290908, 413.69839385600005, 413.69887643927274, 417.79372360145453, 417.79440726109084, 421.8898308421817, 421.8904206661819, 425.9858174370908, 425.98608553890915, 430.0823134254546, 434.178688768, 438.2742061847272, 442.37005872872726, 446.4660319185454, 446.46613915927264, 446.4663134254545, 462.850232994909, 475.13820618472715, 479.2343000203636]\navgArr3 = [159.74953064727276, 159.7500266356364, 159.75250657745457, 159.7535253643637, 163.8445386705455, 163.8447129367273, 163.84507487418185, 163.84571831854552, 163.84864062836368, 163.84909640145463, 163.8494583389091, 163.85055755636367, 167.94163788800003, 167.94210706618185, 167.9431928785455, 167.94330011927275, 167.9443725265455, 167.94441274181824, 167.94531088290915, 167.9473082414546, 167.9500026647273, 167.95037800727277, 172.03707487418185, 172.03794620509095, 172.03813387636364, 172.03899180218184, 172.0390856378182, 172.040292096, 172.04031890618188, 172.04077467927277, 172.04124385745456, 172.04187389672734, 172.04312057018186, 172.0431741905455, 172.04322781090914, 172.0434959127273, 172.0440723316364, 172.04462194036364, 172.04608309527276, 176.13269953163638, 176.1335708625455, 176.1337585338182, 176.13471029527278, 176.13515266327278, 176.13523309381824, 176.13544757527276, 176.13614464000003, 176.13674786909098, 176.13775325090916, 176.13788730181818, 176.138061568, 176.1380749730909, 176.13836988509095, 176.13899992436365, 176.13899992436367, 176.13908035490914, 176.14080961163643, 176.14094366254548, 176.1420160698182, 176.14225736145454, 176.1424182225455, 176.1428471854545, 176.14304826181822, 176.1435442501819, 176.14471049309097, 180.22835099927272, 180.22943681163642, 180.22957086254547, 180.2298791796364, 180.23008025600004, 180.23013387636365, 180.23036176290913, 180.23048240872728, 180.23111244800006, 180.2311794734546, 180.23139395490915, 180.23143417018184, 180.2321312349091, 180.23237252654548, 180.23238593163637, 180.23332428800006, 180.2334181236364, 180.2336728203637, 180.23408837818184, 180.23438329018182, 180.23477203781823, 180.23482565818185, 180.23483906327277, 180.23506694981822, 180.23517419054548, 180.23545569745457, 180.23545569745457, 180.23579082472727, 180.23703749818185, 180.23715814400003, 180.24111264581822, 180.24152820363642, 184.32385501090914, 184.32453867054548, 184.325490432, 184.32549043200004, 184.32557086254548, 184.3255976727273, 184.3261338763637, 184.32641538327277, 184.32646900363642, 184.32649581381818, 184.32668348509094, 184.32708563781824, 184.32716606836365, 184.32731352436366, 184.32786313309094, 184.32787653818184, 184.3279435636364, 184.32803739927277, 184.3281044247273, 184.32814464000003, 184.32817145018188, 184.3282518807273, 184.328292096, 184.3283189061818, 184.32833231127276, 184.32843955200002, 184.3286808436364, 184.3288148945455, 184.32897575563638, 184.32932428800004, 184.3296728203637, 184.32988730181825, 184.33074522763638, 184.33082565818185, 184.3312010007273, 184.33137526690913, 184.33205892654547, 184.33228681309092, 184.3323940538182, 184.33366753745457, 184.33380158836368, 184.33503485672728, 184.33559787054548, 184.33983387927273, 188.42017673309093, 188.42104806400005, 188.4211821149091, 188.42198642036365, 188.4220400407273, 188.42297839709096, 188.42304542254547, 188.42320628363643, 188.42338054981818, 188.42344757527277, 188.42362184145455, 188.42367546181825, 188.42375589236366, 188.4237827025455, 188.42383632290915, 188.4242116654546, 188.4244931723637, 188.42466743854547, 188.4249087301818, 188.4250829963637, 188.42509640145457, 188.42564601018185, 188.42586049163637, 188.42615540363636, 188.42639669527273, 188.42670501236367, 188.42721440581823, 188.42746910254547, 188.4274959127273, 188.4283538385455, 188.42835383854552, 188.42897047272726, 188.42919835927273, 188.43051205818188, 188.43161127563639, 188.43209385890913, 188.43289816436365, 192.51616332800003, 192.51671293672734, 192.51679336727275, 192.5171418996364, 192.51747702690912, 192.5178657745455, 192.5178657745455, 192.51791939490911, 192.51821430690913, 192.5184421934546, 192.51850921890917, 192.51850921890917, 192.51860305454545, 192.51939395490916, 192.5199301585455, 192.52001058909096, 192.52023847563635, 192.5202384756364, 192.5205333876364, 192.5205736029091, 192.520587008, 192.5206808436364, 192.52072105890915, 192.52086851490913, 192.52092213527274, 192.5214315287273, 192.5216728203637, 192.52206156800003, 192.52231626472735, 192.52298651927273, 192.52326802618182, 192.52365677381823, 192.52365677381823, 192.52403211636363, 192.52447448436368, 192.52448788945455, 192.52484982690913, 192.52795980800002, 192.52812066909092, 192.53021186327277, 196.6119890618182, 196.61229737890915, 196.61248505018185, 196.6129274181818, 196.61296763345453, 196.61350383709097, 196.61377193890917, 196.61383896436368, 196.61416068654546, 196.6143081425455, 196.61440197818186, 196.61446900363634, 196.6153269294546, 196.61544757527278, 196.6156620567273, 196.61567546181826, 196.6159703738182, 196.61609101963637, 196.61668084363635, 196.61676127418184, 196.61708299636368, 196.61727066763638, 196.6174047185455, 196.61776665600001, 196.6180347578182, 196.61808837818185, 196.61877203781825, 196.61894630400005, 196.61925462109096, 196.6196031534546, 196.61962996363636, 196.619683584, 196.61991147054547, 196.62042086400007, 196.62339679418187, 196.62456303709092, 200.70671557818187, 200.70715794618184, 200.7082571636364, 200.70883358254548, 200.70918211490908, 200.7094502167273, 200.71006685090913, 200.71010706618188, 200.71085775127275, 200.71123309381815, 200.71127330909093, 200.7113269294546, 200.71150119563637, 200.7115011956364, 200.71188994327272, 200.71219826036366, 200.71273446400002, 200.71292213527278, 200.7130427810909, 200.71321704727274, 200.7144369105455, 200.7145977716364, 200.71463798690914, 200.714651392, 200.71506694981818, 200.7159114705455, 200.71611254690913, 200.7162063825455, 200.7166889658182, 200.71756029672727, 200.72175609018186, 204.80346626327275, 204.8042705687273, 204.8047263418182, 204.8049944436364, 204.80641538327274, 204.80724649890917, 204.80763524654552, 204.80778270254544, 204.8082116654546, 204.80996773236362, 204.8101017832727, 204.810209024, 204.81036988509095, 204.81038329018185, 204.81061117672735, 204.81089268363633, 204.81220638254547, 204.81302409309095, 204.81303749818187, 204.81329219490908, 204.81342624581828, 204.81382839854544, 204.8139758545455, 208.90033759418188, 208.90136978618182, 208.9019998254546, 208.9021472814546, 208.90256283927272, 208.9026566749091, 208.90267008000004, 208.90287115636372, 208.9032733090909, 208.90398377890912, 208.9041044247273, 208.9060749730909, 208.90649053090914, 208.90691949381824, 208.9072546210909, 208.9085281047273, 208.91110188218184, 212.99542604800007, 212.99610970763638, 212.99632418909093, 212.9963912145455, 212.9965922909091, 212.99660569600005, 212.99677996218182, 212.99696763345457, 212.99698103854553, 212.99706146909094, 212.99722233018184, 212.99723573527274, 212.99723573527277, 212.99761107781822, 212.99763788800007, 212.99824111709094, 212.99883094109092, 212.99904542254546, 212.99978270254545, 213.00015804509093, 213.00017145018185, 213.00039933672733, 213.00049317236363, 213.0005065774546, 213.00133769309093, 213.00143152872732, 213.00186049163642, 213.00188730181824, 213.0031607854546, 213.00341548218185, 213.0037506094546, 213.00439405381826, 213.00525197963637, 217.09126518690908, 217.09137242763637, 217.09151988363638, 217.0919890618182, 217.09210970763638, 217.09272634181823, 217.09357086254545, 217.0935976727273, 217.0940132305455, 217.09429473745456, 217.09475051054545, 217.09832966981818, 217.09862458181814, 217.0990133294546, 217.1058365207273, 221.18764052945463, 221.1884314298182, 221.18852526545456, 221.18860569600005, 221.18880677236368, 221.18919552000003, 221.18920892509092, 221.1894368116364, 221.18945021672732, 221.18982555927275, 221.19176929745456, 221.1949328989091, 221.19501332945455, 221.19509376000005, 225.28317135127276, 225.28388182109092, 225.28443142981823, 225.285048064, 225.28567810327277, 225.2867639156364, 225.28721968872733, 225.28811782981825, 225.28873446400004, 225.28916342690914, 225.29230021818188, 229.37842066618188, 229.3793188072727, 229.37976117527273, 229.37981479563638, 229.38024375854548, 229.38043142981826, 229.38071293672735, 229.38102125381826, 229.38145021672727, 229.38230814254553, 229.38273710545462, 229.38289796654553, 229.38358162618192, 229.38438593163642, 229.38449317236365, 229.38464062836366, 233.47510432581822, 233.47554669381825, 233.4758416058182, 233.4763509992728, 233.4773161658182, 233.47742340654548, 233.47766469818188, 233.4782411170909, 233.47857624436364, 233.47864326981824, 233.47899180218184, 233.47919287854546, 233.47928671418188, 233.47940736000004, 233.4816728203637, 233.48324121600007, 233.48647184290917, 237.57224375854545, 237.5724046196364, 237.57311508945463, 237.57345021672734, 237.57414728145457, 237.57475051054544, 237.57477732072726, 237.57491137163638, 237.5751794734546, 237.57590334836365, 241.66725178181824, 241.66825716363638, 241.6682571636364, 241.66869953163638, 241.67042878836364, 241.67167546181827, 241.6718899432728, 241.67336450327267, 241.6745173410909, 245.7634126429091, 245.7635466938182, 245.7645118603637, 245.76549043199998, 245.76605344581822, 245.76764865163636, 245.7682786909091, 249.85941264290912, 249.86016332800006, 249.86052526545456, 249.86094082327276, 249.8609542283637, 249.8611687098182, 249.86340736000002, 249.8661151883636, 249.8665709614546, 253.95542604799996, 253.9591794734546, 253.96553348654547, 258.05137242763635, 258.0516673396364, 262.1471043258182, 262.1472383767273, 262.1495842676364, 262.1515011956364, 266.244310784, 266.24467272145455, 266.2475414109091, 270.3394662632727, 270.3405252654546, 270.34075315200005, 270.34336714472727, 274.43581479563636, 274.4368067723636, 278.5308898443637, 278.53145285818186, 282.6261525643637, 282.6286459112727, 286.7246191010909, 286.7248737978182, 290.8225092189091, 290.8246406283636, 299.0112651869091, 299.0121901381819, 303.10796225163637, 303.11103201745453, 303.11426264436363, 311.3006727214546, 327.68323837672733, 348.1659596101818]\navgArr5 = [122.88727330909094, 126.99153084509094, 135.17265931636368, 135.17791411200002, 135.1780481629091, 135.18060853527274, 135.18133241018185, 135.18613143272728, 135.1867480669091, 135.18723065018187, 135.2095769367273, 135.2254351592728, 139.27024111709096, 139.27369963054548, 139.28205100218184, 139.2851609832728, 139.2853218443637, 139.32260140218187, 139.32307058036366, 143.36506146909093, 143.36512849454547, 143.36616068654544, 143.36862722327277, 143.37016880872727, 143.3707318225455, 143.372125952, 143.37226000290912, 143.3749008058182, 143.37817164800003, 143.37939151127276, 143.38065158981823, 143.3907992436364, 143.39259552581822, 147.46077996218185, 147.4625896494546, 147.4636620567273, 147.46465403345456, 147.4646540334546, 147.46478808436368, 147.46567282036364, 147.4675227229091, 147.46941284072727, 147.47009650036367, 147.4701367156364, 147.47317967127276, 147.4749625483637, 147.47626284218182, 147.47655775418184, 147.4769465018182, 147.47706714763638, 147.47775080727274, 147.47791166836365, 147.4791315316364, 147.47949346909093, 147.47969454545458, 147.47977497600004, 147.48181254981822, 147.48734885236365, 147.48875638690907, 147.4919870138182, 147.49330071272732, 147.49458760145455, 147.5109954327273, 151.55514454109093, 151.5573161658182, 151.55753064727276, 151.55865667490912, 151.55956822109093, 151.55967546181822, 151.5599703738182, 151.56003739927274, 151.56097575563638, 151.56156557963638, 151.5621285934546, 151.5627318225455, 151.56299992436368, 151.56388466036367, 151.56392487563642, 151.5643538385455, 151.56444767418188, 151.56550667636367, 151.56554689163642, 151.5657077527273, 151.5660026647273, 151.56639141236366, 151.56735657890914, 151.56801342836366, 151.56806704872733, 151.5709491432728, 151.57304033745461, 151.57322800872734, 151.57517174690915, 151.57531920290913, 151.57594924218182, 151.5764854458182, 151.57652566109095, 151.5797294778182, 151.58687439127274, 151.58824171054547, 151.59039993018186, 151.5907618676364, 151.62749181672731, 151.63096373527273, 151.64434201600005, 155.65066195781822, 155.6513188072728, 155.65338319127275, 155.65468348509094, 155.6547907258182, 155.6548845614546, 155.65601058909095, 155.65614464, 155.65618485527276, 155.65621166545458, 155.65623847563637, 155.65698916072728, 155.65795432727276, 155.65887927854553, 155.65944229236365, 155.65950931781822, 155.65971039418184, 155.66055491490914, 155.6607425861819, 155.66078280145456, 155.66079620654548, 155.66149327127275, 155.66200266472734, 155.66231098181822, 155.66288740072733, 155.6644692014546, 155.6649651898182, 155.66605100218186, 155.66688211781823, 155.66706978909096, 155.66711000436368, 155.66890628654545, 155.6697910225455, 155.66999209890915, 155.67062213818184, 155.6709706705455, 155.67154708945458, 155.6716409250909, 155.67170795054548, 155.67272673745458, 155.6741074618182, 155.67453642472728, 155.6746972858182, 155.6755418065455, 155.67587693381822, 155.6771370123637, 155.67900032000003, 155.67916118109096, 155.68450981236364, 155.68701656436366, 155.6894428858182, 155.69512664436365, 155.69571646836366, 155.70232517818184, 155.72153467345456, 159.74784160581822, 159.7484180247273, 159.74844483490912, 159.7485252654546, 159.7487665570909, 159.74922233018188, 159.7496915083637, 159.7508041309091, 159.75111244800004, 159.75119287854548, 159.75135373963641, 159.75156822109093, 159.75160843636363, 159.75257360290914, 159.75296235054552, 159.75376665600004, 159.75402135272728, 159.7540347578182, 159.75435648000004, 159.7547854429091, 159.75600530618186, 159.75605892654548, 159.7563672436364, 159.75671577600002, 159.7567962065455, 159.75768094254548, 159.7581367156364, 159.75820374109094, 159.7582171461818, 159.75835119709092, 159.75839141236364, 159.75851205818182, 159.7590214516364, 159.75923593309093, 159.75934317381822, 159.75941019927276, 159.759664896, 159.75981235200004, 159.76029493527275, 159.76101881018184, 159.7619571665455, 159.76261401600004, 159.7627346618182, 159.76280168727274, 159.76514757818185, 159.76530843927276, 159.76615296000003, 159.76632722618183, 159.76632722618183, 159.76634063127278, 159.76710472145456, 159.76809669818184, 159.76963828363637, 159.77069728581824, 159.77146137600002, 159.77197076945455, 159.77631401890912, 159.78150178909092, 159.7834455272728, 159.79100599854547, 159.79164944290912, 159.80555052218185, 159.80632801745458, 159.81101979927274, 159.81839259927276, 159.82126128872733, 159.82929093818188, 163.84290324945457, 163.84333221236366, 163.8439622516364, 163.8441365178182, 163.8442839738182, 163.84480677236365, 163.84507487418185, 163.84507487418185, 163.84510168436364, 163.84524914036368, 163.84549043200005, 163.84565129309095, 163.84569150836364, 163.8456915083637, 163.84612047127277, 163.84701861236368, 163.8470588276364, 163.84780951272734, 163.8478363229091, 163.84787653818185, 163.84831890618185, 163.84834571636367, 163.84851998254547, 163.8486272232727, 163.8488148945455, 163.84916342690914, 163.8494047185455, 163.8505843665455, 163.85085246836363, 163.8513484567273, 163.85258172509094, 163.85335922036367, 163.85475334981822, 163.85479356509094, 163.85495442618182, 163.85522252800004, 163.85530295854548, 163.8574209629091, 163.85764884945456, 163.85823867345454, 163.85849337018186, 163.85908319418186, 163.86113417309093, 163.8612950341818, 163.86221998545457, 163.86342644363634, 163.8642843694546, 163.86546401745457, 163.86557125818183, 163.8657053090909, 163.8665632349091, 163.86663026036368, 163.86687155200005, 163.87108075054547, 163.87274298181822, 163.87445883345455, 163.87945893236363, 163.8803570734546, 163.8808396567273, 163.88456627199997, 163.88503545018185, 163.88522312145454, 163.8960142196364, 163.9115373149091, 163.92691295418186, 167.93929199709092, 167.93930540218184, 167.94035099927274, 167.94079336727273, 167.94083358254545, 167.94108827927275, 167.9411418996364, 167.94147702690913, 167.9436084363637, 167.9439837789091, 167.94414464000005, 167.9446004130909, 167.94466743854548, 167.9447210589091, 167.94493554036364, 167.94498916072732, 167.94560579490908, 167.94636988509095, 167.94659777163642, 167.94673182254547, 167.94673182254547, 167.94768358400003, 167.94787125527276, 167.9482600029091, 167.94902409309097, 167.9501233105455, 167.95063270400004, 167.95112869236368, 167.95189278254549, 167.9526032523637, 167.95273730327278, 167.9528981643637, 167.95657115927276, 167.95925217745454, 167.96008329309095, 167.96037820509096, 167.96074014254546, 167.9621610821819, 167.96364904727278, 167.9638367185455, 167.9645739985455, 167.96582067200004, 167.9669332945455, 167.96706734545455, 167.9675365236364, 167.96929259054545, 167.97245619200004, 167.97885042036364, 167.97944024436364, 167.98515081309094, 167.98781842618183, 167.99146461090916, 168.00708154181822, 168.0103255738182, 172.03431342545457, 172.03541264290914, 172.03668612654548, 172.03690060800002, 172.03696763345454, 172.03795961018187, 172.03838857309097, 172.03852262400002, 172.03919287854552, 172.03919287854552, 172.03983632290914, 172.03986313309096, 172.03993015854547, 172.0402652858182, 172.0406540334546, 172.04223583418184, 172.04337526690912, 172.04545305600004, 172.0488043287273, 172.04931372218184, 172.0495416087273, 172.04994376145459, 172.05015824290913, 172.0538982632728, 172.05478299927276, 172.05596264727276, 172.05809405672736, 172.0583487534546, 172.05934073018187, 172.06065442909096, 172.0626249774546, 172.06464914618184, 172.07540002909093, 172.08361734981818, 172.11956980363638, 176.13113113600005, 176.1312115665455, 176.13184160581818, 176.13188182109093, 176.1331687098182, 176.1335976727273, 176.1350588276364, 176.1350990429091, 176.1352464989091, 176.13530011927273, 176.1356888669091, 176.13669424872728, 176.13929483636366, 176.14282037527275, 176.14409385890914, 176.1467212567273, 176.1467882821819, 176.1487454254546, 176.14921460363638, 176.15212350836364, 176.15534073018182, 180.2269166545455, 180.227439453091, 180.22745285818186, 180.22860569600002, 180.22890060800003, 180.22943681163642, 180.2296915083637, 180.23065667490908, 180.23112585309093, 180.23154141090913, 180.23225188072732, 180.2334851490909, 180.2336862254546, 180.23485246836367, 180.2360857367273, 180.2372251694546, 180.24005364363643, 180.24919591563642, 180.25608613236363, 180.28203838836365, 184.32238045090912, 184.32401587200002, 184.32504806400004, 184.32658964945455, 184.3267907258182, 184.32744757527274, 184.32817145018183, 184.32822507054544, 184.32900256581823, 184.3291232116364, 184.32920364218188, 184.32932428800007, 184.33256832000004, 184.3363887709091, 188.41842066618185, 188.42017673309093, 188.42031078400007, 188.4208201774546, 188.4210078487273, 188.42108827927277, 188.42149043199998, 188.42395696872734, 188.42410442472735, 188.42563260509095, 188.4287828014546, 188.43204023854545, 188.44147742254552, 188.44725501672733, 192.51522497163637, 192.5152919970909, 192.5171553047273, 192.5171687098182, 192.51747702690915, 192.51749043200005, 192.51782555927278, 192.51904542254553, 192.52042614690913, 196.61196225163638, 196.61290060800005, 196.6144019781818, 196.61512585309094, 196.61617145018187, 196.61670765381822, 196.61744493381818, 196.62150667636368, 200.70670217309092, 200.70691665454547, 200.7090480640001, 200.70953064727274, 204.80270217309098, 204.80441802472728, 204.80482017745462, 204.80589258472727, 208.89907751563638, 208.90005608727276, 208.90161107781827, 212.99424640000004, 212.99506411054546, 212.9953188072728, 212.9955198836364, 212.9963644043637, 212.9970614690909, 217.09036704581823, 217.09048769163638, 217.09149307345461, 217.0955816261818, 217.09843691054547, 221.1907505105455, 225.28234023563644, 225.2841767330909, 225.28447164509092, 225.28566469818188, 229.37883622400005, 229.3835146007273, 237.57062174254548, 241.66789522618186, 249.86256283927273, 262.1457504116364, 270.3390775156364]\navgArr7 = [118.78773436509093, 118.78825716363637, 118.78853867054544, 126.97961371927276, 131.0759488465455, 131.07596225163638, 131.07598906181823, 131.07601587200003, 131.0760694923637, 131.07696763345456, 135.17160031418183, 135.17201587200003, 135.1721231127273, 135.17216332800004, 135.17239121454546, 135.1724180247273, 139.267573504, 139.2676941498182, 139.2678416058182, 139.26786841600003, 139.26797565672732, 139.26806949236365, 139.2680963025455, 139.2681365178182, 139.26814992290912, 139.26819013818184, 139.26831078400002, 139.2686995316364, 139.26891401309095, 139.26894082327274, 143.36364052945456, 143.36365393454548, 143.36369414981823, 143.3637075549091, 143.36372096000002, 143.36373436509092, 143.36374777018185, 143.3638416058182, 143.3641365178182, 143.36424375854548, 143.36431078400003, 143.36443142981818, 147.45957350400005, 147.45980139054544, 147.45981479563636, 147.45990863127273, 147.46010970763638, 147.46020354327277, 147.46028397381818, 147.46031078400003, 147.46035099927275, 147.4604046196364, 147.46048505018186, 147.46049845527273, 147.46051186036365, 147.46064591127276, 147.4608469876364, 147.4610212538182, 147.461048064, 147.55354319127275, 151.55501049018184, 151.5552249716364, 151.55564052945456, 151.55564052945456, 151.55592203636368, 151.55602927709094, 151.55608289745456, 151.5561365178182, 151.55614992290913, 151.55621694836367, 151.55623035345457, 151.55631078400003, 151.55632418909096, 151.55633759418185, 151.5564314298182, 151.55644483490912, 151.55644483490914, 151.5565788858182, 151.55676655709092, 151.5568872029091, 155.65150647854546, 155.6515466938182, 155.65158690909092, 155.6517343650909, 155.6518282007273, 155.65185501090912, 155.6519756567273, 155.65202927709095, 155.65216332800003, 155.65224375854547, 155.65228397381821, 155.65237780945455, 155.6524180247273, 155.6524314298182, 155.65244483490912, 155.65265931636367, 155.6528872029091, 155.65306146909094, 155.65312849454548, 159.74719816145458, 159.74733221236366, 159.74734561745453, 159.7474394530909, 159.74750647854546, 159.7476539345455, 159.7477075549091, 159.74773436509093, 159.74778798545458, 159.74778798545458, 159.74781479563637, 159.7478147956364, 159.7478282007273, 159.74785501090912, 159.74797565672728, 159.74804268218185, 159.74804268218185, 159.74808289745457, 159.7480828974546, 159.74812311272728, 159.7481365178182, 159.7481767330909, 159.74835099927273, 159.74836440436366, 159.74840461963637, 159.74847164509092, 159.74847164509094, 159.74848505018187, 159.74860569600003, 159.74860569600003, 159.748753152, 163.84314454109096, 163.84326518690912, 163.8435198836364, 163.84353328872731, 163.8435600989091, 163.84361371927275, 163.84364052945458, 163.84370755490912, 163.84372095999998, 163.8437611752728, 163.8437745803637, 163.84380139054548, 163.8438550109091, 163.84389522618184, 163.84397565672728, 163.84405608727278, 163.84406949236364, 163.84409630254547, 163.8442571636364, 163.84429737890912, 163.84429737890912, 163.84436440436366, 163.8443912145455, 163.84447164509092, 163.84448505018182, 163.84452526545454, 163.8445520756364, 163.8445654807273, 163.84461910109093, 163.84463250618185, 163.8447129367273, 163.84473974690914, 163.84494082327274, 163.8451418996364, 167.93898367999998, 167.93901049018183, 167.9392249716364, 167.93947966836367, 167.9395332887273, 167.9395466938182, 167.93957350400004, 167.9396539345455, 167.9396941498182, 167.93974777018187, 167.93977458036363, 167.93985501090913, 167.93988182109092, 167.93988182109092, 167.940015872, 167.94005608727278, 167.9401767330909, 167.94019013818186, 167.94021694836366, 167.94027056872733, 167.94035099927274, 167.94047164509095, 167.94049845527275, 167.9405118603637, 167.94052526545457, 167.9405922909091, 167.94061910109096, 167.94068612654547, 167.94069953163637, 167.94076655709094, 167.94077996218184, 167.94077996218186, 167.94090060800005, 167.94132957090915, 172.03518475636366, 172.03549307345457, 172.03550647854544, 172.03562712436369, 172.03568074472733, 172.03569414981823, 172.03572096, 172.03581479563638, 172.03582820072725, 172.0358282007273, 172.035868416, 172.03601587200004, 172.03606949236365, 172.03609630254547, 172.03609630254547, 172.0361231127273, 172.03619013818184, 172.0362035432727, 172.03621694836366, 172.03623035345458, 172.0362705687273, 172.03631078400002, 172.0363375941819, 172.03637780945456, 172.0364046196364, 172.0364180247273, 172.03647164509093, 172.03648505018188, 172.0365922909091, 172.03663250618186, 172.03665931636365, 172.03668612654548, 172.036753152, 172.0369542283637, 172.03712849454547, 172.03763788800003, 176.13088984436362, 176.13099708509094, 176.13111773090912, 176.13122497163636, 176.13131880727278, 176.13146626327273, 176.13160031418184, 176.13161371927276, 176.13161371927276, 176.13165393454543, 176.13172096000002, 176.1318147956364, 176.1318147956364, 176.132015872, 176.13201587200004, 176.13205608727276, 176.13208289745455, 176.13241802472731, 176.13249845527275, 176.13259229090912, 176.1327263418182, 176.1328469876364, 176.13295422836364, 176.13296763345454, 180.2268094138182, 180.22719816145457, 180.2273724276364, 180.22753328872727, 180.2276539345455, 180.22766733963638, 180.22766733963638, 180.22797565672732, 180.22804268218187, 180.22809630254548, 180.22816332800002, 180.22817673309092, 180.22817673309095, 180.22832418909093, 180.22839121454547, 180.22840461963634, 180.2284180247273, 180.2284180247273, 180.22843142981822, 180.22851186036365, 180.22852526545458, 180.2285788858182, 180.2286191010909, 180.2286861265455, 180.22872634181823, 180.22879336727274, 180.22886039272726, 180.22902125381822, 180.22916870981823, 180.22983896436367, 184.32290324945453, 184.32346626327273, 184.32351988363644, 184.32360031418182, 184.32370755490913, 184.32370755490916, 184.32378798545454, 184.32380139054544, 184.3238282007273, 184.32385501090914, 184.32389522618186, 184.3239086312728, 184.32397565672727, 184.32400246690915, 184.32404268218187, 184.32425716363642, 184.32433759418186, 184.32445824000007, 184.32447164509097, 184.32448505018183, 184.32449845527276, 184.32455207563638, 184.32459229090912, 184.32461910109095, 184.32468612654546, 184.32482017745457, 184.32486039272732, 184.32490060800006, 184.32511508945456, 184.32542340654544, 188.41918475636365, 188.4192115665455, 188.41954669381823, 188.41964052945457, 188.4196539345455, 188.41968074472732, 188.4197075549091, 188.41985501090912, 188.4199220363637, 188.4200963025455, 188.42013651781818, 188.4201499229091, 188.42023035345457, 188.42024375854547, 188.42027056872732, 188.4204046196364, 188.42060569600005, 188.42060569600005, 188.42064591127274, 188.4206459112728, 188.42076655709093, 188.42080677236368, 188.42087379781822, 188.42088720290914, 188.42092741818186, 188.4210882792728, 188.4212893556364, 188.42174512872734, 188.4393192029091, 192.51505070545457, 192.5150775156364, 192.51554669381818, 192.51585501090915, 192.51590863127274, 192.51610970763636, 192.5161097076364, 192.5161231127273, 192.51617673309093, 192.51620354327272, 192.5162571636364, 192.51629737890912, 192.51639121454548, 192.51665931636364, 192.5166727214546, 192.51682017745458, 192.51691401309094, 192.5169408232728, 192.5170212538182, 192.51738319127276, 196.61105070545463, 196.61139923781818, 196.61150647854546, 196.61165393454547, 196.61174777018184, 196.61177458036363, 196.61180139054545, 196.61188182109098, 196.61205608727275, 196.61265931636365, 196.6127397469091, 196.61275315200004, 196.61312849454546, 196.6132625454546, 196.6132759505455, 196.61355745745456, 200.706688768, 200.70747966836362, 200.70776117527276, 200.70805608727278, 200.70825716363638, 200.70835099927274, 200.70839121454546, 200.70841802472728, 200.70845824000003, 200.70877996218184, 200.7088603927273, 204.8031177309091, 204.8032383767273, 204.80337242763636, 204.80361371927273, 204.80374777018184, 204.80380139054552, 204.80382820072728, 204.8038550109091, 204.80385501090913, 204.80388182109095, 204.80397565672732, 204.80432418909092, 204.80441802472734, 204.80467272145458, 208.89906411054545, 208.89953328872724, 208.89978798545457, 208.8999622516364, 208.90006949236366, 208.90008289745455, 208.9001097076364, 208.90021694836366, 208.90023035345456, 208.90028397381818, 208.90059229090912, 208.9006191010909, 208.90063250618178, 208.9006861265455, 208.90082017745462, 208.90108827927278, 208.9010882792728, 212.9953724276364, 212.995426048, 212.9954528581818, 212.99546626327276, 212.99547966836366, 212.99553328872727, 212.99560031418187, 212.9957075549091, 212.99586841600004, 212.99592203636365, 212.99594884654547, 212.99608289745458, 212.99619013818184, 212.99637780945457, 212.99644483490908, 212.9965654807273, 212.9966325061818, 212.99665931636366, 212.99667272145456, 212.9968603927273, 217.09103730036367, 217.09126518690908, 217.09137242763637, 217.09137242763643, 217.09177458036365, 217.09190863127276, 217.09235099927278, 217.09256548072725, 217.09260569600002, 217.09315530472733, 221.18719816145457, 221.18753328872728, 221.18762712436364, 221.187868416, 221.18875315200003, 225.2832517818182, 225.2832651869091, 225.2834930734546, 225.2838147956364, 225.2839622516364, 225.28398906181826, 225.28409630254544, 225.28447164509092, 225.28499444363638, 225.28519552, 229.37941264290913, 229.37997565672728, 229.38016332799998, 229.38068612654547, 233.4751713512727, 233.47522497163638, 233.47557350400007, 233.47560031418183, 233.47568074472724, 233.4756941498182, 233.47569414981822, 233.47570755490912, 233.47632418909095, 233.47680677236363, 237.57173436509095, 237.57219013818178, 241.6672115665455, 241.66820354327277, 241.66821694836364, 245.7648067723636, 253.95577458036362, 253.95586841600004, 253.95592203636363, 262.14796225163633, 262.14831078399993, 270.3395466938182, 270.3400024669091, 278.5319488465455, 294.91758426763636, 294.9177719389091]\navgArr9 = [98.3116754618182, 98.31239933672728, 98.31245295709091, 98.31306959127274, 98.3132840727273, 98.31380687127273, 98.31526802618184, 102.40797037381819, 102.40818485527275, 102.4082384756364, 102.40857360290913, 102.40865403345452, 102.40890873018184, 102.40900256581818, 102.40921704727272, 102.40955217454547, 102.40991411200002, 102.41061117672729, 106.50358162618184, 106.50363524654547, 106.50387653818181, 106.50401058909094, 106.50414464, 106.50415804509095, 106.50418485527275, 106.50427869090912, 106.5044529570909, 106.5046942487273, 106.50484170472726, 106.5048685149091, 106.50506959127273, 106.5051500218182, 106.5051500218182, 106.505471744, 106.5059007069091, 106.50614199854546, 110.59895158690911, 110.59931352436364, 110.59942076509091, 110.59982291781819, 110.5998229178182, 110.60015804509094, 110.60022507054548, 110.60046636218183, 110.6006004130909, 110.60069424872727, 110.60077467927273, 110.60081489454544, 110.60104278109091, 110.60112321163636, 110.60117683200001, 110.60182027636365, 114.69509904290909, 114.6951928785455, 114.69534033454548, 114.69551460072731, 114.69558162618186, 114.69562184145458, 114.69572908218183, 114.6958095127273, 114.6958095127273, 114.69584972800001, 114.69618485527273, 114.69621166545456, 114.69627869090911, 114.69629209600002, 114.69630550109093, 114.6964261469091, 114.69647976727273, 114.69653338763638, 114.69674786909093, 114.69676127418182, 114.69696235054548, 114.69698916072728, 114.69704278109093, 114.69710980654547, 114.69728407272731, 114.69795432727274, 114.69930824145455, 118.79045559854546, 118.79073710545455, 118.79081753600002, 118.79115266327275, 118.79127330909094, 118.79131352436364, 118.79135373963636, 118.79142076509093, 118.79148779054547, 118.79166205672729, 118.7918229178182, 118.79194356363638, 118.79201058909092, 118.79201058909092, 118.79206420945455, 118.79226528581822, 118.79258700800001, 118.79265403345457, 118.79268084363639, 118.79277467927274, 118.79278808436364, 118.79278808436364, 118.79292213527275, 118.79293554036366, 118.79294894545455, 118.79296235054545, 118.79329747781821, 118.79341812363637, 122.88664326981821, 122.88673710545454, 122.88679072581822, 122.88680413090911, 122.88734033454548, 122.88735373963638, 122.88738054981819, 122.8875280058182, 122.88770227200004, 122.88778270254548, 122.88778270254548, 122.88779610763639, 122.88790334836364, 122.88793015854547, 122.88801058909094, 122.88813123490908, 122.88819826036367, 122.8882786909091, 122.88833231127273, 122.88843955200002, 122.88845295709096, 122.8885736029091, 122.88901597090911, 122.88917683200002, 122.88919023709096, 122.889766656, 122.89018221381819, 122.8906379869091, 126.98262986472727, 126.98273710545455, 126.9830588276364, 126.98330011927276, 126.98342076509093, 126.98363524654548, 126.98367546181821, 126.98370227200003, 126.98372908218184, 126.98372908218184, 126.98375589236365, 126.9837961076364, 126.98380951272729, 126.98388994327273, 126.9839435636364, 126.98402399418183, 126.98406420945456, 126.98409101963642, 126.98421166545457, 126.98433231127275, 126.98434571636366, 126.98438593163637, 126.98456019781821, 126.98460041309092, 126.98476127418185, 126.9848417047273, 126.9848551098182, 126.98501597090907, 126.985029376, 126.98502937600001, 126.9856594152727, 131.07734297600004, 131.07798642036366, 131.07832154763636, 131.07904542254545, 131.07905882763637, 131.07928671418185, 131.07930011927274, 131.07939395490908, 131.0794207650909, 131.07948779054547, 131.0795011956364, 131.0795146007273, 131.079554816, 131.07960843636366, 131.0796486516364, 131.07975589236366, 131.07975589236366, 131.0798363229091, 131.0798363229091, 131.08006420945455, 131.0801312349091, 131.08025188072727, 131.08025188072733, 131.0804127418182, 131.080439552, 131.08043955200003, 131.08049317236365, 131.08066743854545, 131.08072105890912, 131.08081489454548, 131.08093554036367, 131.08096235054546, 131.08096235054546, 131.0810159709091, 131.08106959127275, 131.081176832, 131.08121704727273, 135.1737049134546, 135.1743483578182, 135.1743483578182, 135.17440197818183, 135.17442878836366, 135.17450921890915, 135.17484434618183, 135.1749381818182, 135.1751928785455, 135.1751928785455, 135.17534033454547, 135.17538054981821, 135.17539395490908, 135.17539395490908, 135.1755146007273, 135.17555481600002, 135.1756888669091, 135.17575589236364, 135.17576929745456, 135.17576929745456, 135.17586313309093, 135.17588994327275, 135.1759435636364, 135.17606420945458, 135.17617145018184, 135.17617145018184, 135.17625188072728, 135.17630550109092, 135.176439552, 135.17646636218183, 135.17647976727275, 135.1765467927273, 135.17657360290912, 135.17662722327276, 135.1766942487273, 135.1766942487273, 135.17672105890912, 135.1768417047273, 135.1774449338182, 135.1775789847273, 135.1777264407273, 139.26997301527274, 139.2702009018182, 139.27032154763637, 139.2704287883637, 139.27044219345458, 139.2706566749091, 139.2707505105455, 139.2707639156364, 139.27085775127273, 139.27095158690912, 139.27100520727276, 139.2710722327273, 139.27115266327277, 139.2712062836364, 139.271259904, 139.27125990400003, 139.27143417018186, 139.2715011956364, 139.27152800581823, 139.2715414109091, 139.27154141090912, 139.27160843636364, 139.2717961076364, 139.2718229178182, 139.271849728, 139.27187653818183, 139.27205080436366, 139.2723859316364, 139.27249317236365, 139.27250657745452, 139.27253338763637, 139.2725601978182, 139.2726942487273, 139.27380687127277, 143.36626792727273, 143.36628133236366, 143.36630814254548, 143.36642878836363, 143.3666566749091, 143.36671029527275, 143.36676391563637, 143.36679072581822, 143.366817536, 143.36688456145455, 143.36712585309093, 143.36715266327275, 143.3671928785455, 143.3672196887273, 143.36725990400004, 143.36732692945455, 143.36744757527276, 143.3675146007273, 143.36754141090913, 143.36755481600002, 143.36775589236365, 143.36786313309094, 143.367997184, 143.36807761454548, 143.36809101963638, 143.36810442472728, 143.3681178298182, 143.36825188072729, 143.3682786909091, 143.36854679272727, 143.36860041309095, 143.3687076538182, 143.36888192000004, 143.3690695912728, 143.3695923898182, 143.369914112, 143.37015540363637, 143.37026264436363, 143.37114738036362, 143.371536128, 147.46111508945455, 147.4617585338182, 147.46195961018185, 147.4620132305455, 147.46232154763638, 147.46317947345457, 147.46344757527277, 147.46352800581818, 147.46356822109092, 147.4636888669091, 147.46374248727273, 147.46390334836363, 147.46402399418184, 147.46403739927274, 147.46457360290913, 147.46477467927275, 147.46481489454547, 147.4648417047273, 147.46506959127274, 147.46510980654546, 147.46536450327275, 147.46549855418186, 151.55801323054547, 151.55820090181817, 151.55821430690912, 151.55850921890908, 151.55887115636364, 151.5588979665455, 151.55920628363637, 151.55931352436366, 151.55938054981823, 151.55940736000005, 151.5596486516364, 151.55970227200004, 151.5598229178182, 151.55984972799996, 151.55993015854548, 151.5599569687273, 151.56005080436367, 151.56015804509093, 151.56018485527275, 151.56047976727274, 151.56051998254551, 151.56053338763638, 151.56061381818188, 151.56065403345457, 151.5606540334546, 151.5607210589091, 151.56088192, 151.56102937600005, 151.56113661672728, 151.56216880872728, 155.65473710545456, 155.65485775127274, 155.65487115636364, 155.65499180218183, 155.65513925818186, 155.65523309381817, 155.65530011927274, 155.6555682210909, 155.65559503127272, 155.6556486516364, 155.65567546181822, 155.65594356363638, 155.65602399418182, 155.65642614690913, 155.6564797672727, 155.6565333876364, 155.6565736029091, 155.65705618618185, 155.65830285963636, 155.65855755636366, 159.75010706618184, 159.75045559854547, 159.7505360290909, 159.75053602909094, 159.75077732072725, 159.7508041309091, 159.75083094109092, 159.75087115636367, 159.7508979665455, 159.7509247767273, 159.75099180218183, 159.75113925818184, 159.75120628363638, 159.75125990400002, 159.75144757527272, 159.75146098036367, 159.7515146007273, 159.751702272, 159.7517692974546, 159.75180951272728, 159.7518095127273, 159.75197037381818, 159.75229209600002, 159.75233231127274, 159.75237252654546, 159.75241274181818, 159.7525601978182, 159.752587008, 159.75379346618186, 159.75423583418186, 163.84610706618184, 163.84650921890912, 163.8478631330909, 163.84802399418186, 163.8482652858182, 163.84854679272732, 163.84858700800004, 163.8487210589091, 163.8489623505455, 163.84936450327274, 163.84952536436364, 167.94301861236366, 167.94304542254548, 167.94328671418185, 167.94335373963636, 167.9439569687273, 167.94441274181824, 167.9450025658182, 167.94539131345456, 167.94551195927278, 167.94614199854547, 172.0382143069091, 172.03826792727276, 172.03829473745455, 172.0382947374546, 172.03872370036368, 172.03911244800003, 172.03915266327277, 172.03988994327273, 172.04060041309094, 172.04082829963633, 172.04136450327275, 172.0415387694546, 172.04194092218185, 172.04212859345458, 176.1346164596364, 176.13472370036368, 176.13480413090915, 176.13487115636366, 176.1356352465455, 176.13575589236362, 176.13672105890913, 176.13719023709095, 176.13733769309096, 176.13736450327278, 176.13811518836366, 176.1384101003637, 180.2304153832727, 180.2316620567273, 180.23226528581822, 180.2327076538182, 180.23384708654547, 184.32801058909092, 184.32830550109094, 184.32831890618183, 184.32937790836368, 184.32983368145455, 184.33020902400003, 188.4213161658182, 188.4232062836364, 188.4238363229091, 188.42451998254546, 188.4257532509091, 188.42626264436365, 188.4262626443637, 192.51880413090913, 192.52050657745457, 192.52173984581816, 196.61442878836365, 196.61572908218182, 196.61631890618182, 200.71242614690908, 200.71254679272727, 204.80731352436362, 204.80740735999998, 204.80917683200002, 204.8092974778182, 208.90234835781823, 212.99833495272725, 213.00080148945457]\n\na0 = np.array(avgArr0)\na1 = np.array(avgArr1)\na3 = np.array(avgArr3)\na5 = np.array(avgArr5)\na7 = np.array(avgArr7)\na9 = np.array(avgArr9)\n\nsns.set(style=\"white\", palette=\"muted\", color_codes=True)\n\nf, axes = plt.subplots(figsize=(10, 10))\n\naxlabel = 'Среднее время, мс'\n\nsns.distplot(a0, color=\"#4285f4\", ax=axes, axlabel=axlabel, label='0')\nsns.distplot(a1, color=\"#ea4335\", ax=axes, axlabel=axlabel, label='0.1')\nsns.distplot(a3, color=\"#fbbc04\", ax=axes, axlabel=axlabel, label='0.3')\nsns.distplot(a5, color=\"#34a853\", ax=axes, axlabel=axlabel, label='0.5')\nsns.distplot(a7, color=\"#ff6d01\", ax=axes, axlabel=axlabel, label='0.7')\nsns.distplot(a9, color=\"#46bdc6\", ax=axes, axlabel=axlabel, label='0.9')\n\naxes.legend()\n\nplt.setp(axes)\nplt.tight_layout()\nplt.show()",
"import numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\navgArr = [684.0322959592726, 884.7363009861817, 888.8322884189091, 942.080300986182, 970.7522934458182, 991.2322959592727, 991.2323009861818, 1011.712300986182, 1036.288295959273, 1044.4803034996364, 1060.8642859054546, 1069.056306013091, 1077.2482984727276, 1122.304306013091, 1130.4963034996365, 1134.5923034996363, 1142.7843009861817, 1146.8802884189092, 1159.1682984727274, 1163.2643085265454, 1224.7042884189093, 1232.8963110400005, 1249.280295959273, 1261.5682934458184, 1265.6642833920002, 1282.0483060130912, 1290.2402984727278, 1298.4323110400003, 1331.2003034996367, 1351.680290932364, 1363.968290932364, 1363.9682959592733, 1409.0243009861824, 1409.024303499637, 1413.1202984727277, 1413.120306013091, 1429.5042884189097, 1429.5043060130913, 1470.464293445819, 1495.0402959592734, 1527.808285905455, 1556.4803034996369, 1593.3442934458185, 1613.8243009861826, 1634.3042859054556, 1638.400285905455, 1650.6882884189097, 1662.9763009861827, 1699.8403085265466, 1740.800303499637, 1757.1842959592736, 1757.1843009861827, 1761.2803160669096, 1781.7602909323643, 1794.0482959592732, 1794.0483034996373, 1802.2402909323644, 1810.432298472728, 1822.7202859054553, 1830.9122959592737, 1843.2003034996371, 1855.4882959592733, 1863.680295959274, 1867.7763009861828, 1892.3522909323647, 1908.736311040001, 1929.21628841891, 1982.4643135534557, 2002.944295959274, 2048.0002984727284, 2064.3842859054553, 2158.5922934458195, 2207.7442934458195, 2228.2242833920013, 2240.512300986183, 2269.1842884189105, 2285.568283392001, 2338.8162984727287, 2351.104295959274, 2416.6402984727292, 2424.832290932365, 2461.696298472729, 2478.080295959274, 2568.1922934458194, 2654.20829344582, 2715.648303499638, 2756.6083009861827, 2801.66429344582, 2936.8322909323656, 2990.0803160669107, 3043.3282984727293, 3117.0563210938194, 3125.2482959592744, 3170.304298472728, 3280.896278365093, 3379.200290932365, 3506.176295959273, 3850.240300986182, 3887.104285905455, 5201.920300986177]\nmaxArr = [1331.2002984727274, 1638.4003613090913, 1740.800235636364, 1740.8002984727277, 1740.8003613090914, 1740.8003613090914, 1843.2002356363641, 1843.2002984727278, 1843.2003613090915, 1843.2003613090915, 1945.600298472728, 1945.600298472728, 1945.600298472728, 1945.6003613090916, 2048.0002984727284, 2048.0002984727284, 2048.0002984727284, 2048.000361309092, 2048.000361309092, 2048.000361309092, 2150.400235636365, 2150.400235636365, 2150.4002984727285, 2150.400361309092, 2150.400361309092, 2150.400361309092, 2150.400361309092, 2252.8003613090923, 2252.8003613090923, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2355.2003613090924, 2457.600298472729, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2457.6003613090925, 2560.000298472729, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2560.0003613090926, 2662.4002356363653, 2662.400298472729, 2662.400298472729, 2662.4003613090927, 2662.4003613090927, 2764.800298472729, 2764.800298472729, 2764.8003613090928, 2764.8003613090928, 2867.200298472729, 2867.200361309093, 2867.200361309093, 2867.200361309093, 2969.600361309093, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3072.0002984727294, 3174.4002984727294, 3174.4002984727294, 3174.400361309093, 3174.400361309093, 3174.400361309093, 3276.800361309093, 3276.800361309093, 3379.200235636366, 3379.2003613090933, 3481.6002984727297, 3481.6002984727297, 3481.6003613090934, 3481.6003613090934, 3686.4002356363662, 3686.4003613090936, 3686.4003613090936, 3788.80029847273, 3788.8003613090937, 3993.60029847273, 3993.60029847273, 3993.600361309094, 3993.600361309094, 4096.000361309092, 4096.000361309092, 4198.400298472728, 4198.400361309092, 4198.400361309092, 4300.800361309091, 4608.000235636363, 4608.00036130909, 5017.6002356363615, 5017.600361309089, 5120.000235636361, 7168.000361309081]\n\na = np.array(avgArr)\nm = np.array(maxArr)\n\nx1 = pd.Series(a, name=\"Среднее время, мс\")\nx2 = pd.Series(m, name=\"Максимальное время, мс\")\n\nsns.jointplot(x1, x2, kind=\"hex\")\n\nplt.tight_layout()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.setp",
"numpy.array",
"matplotlib.pyplot.show"
],
[
"matplotlib.pyplot.tight_layout",
"numpy.array",
"pandas.Series",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
dliu5812/PDAM | [
"09a5511e6bd10158ea06771c63be3150982b9fe3"
] | [
"maskrcnn_benchmark/modeling/domain_adaption/LabelResizeLayer.py"
] | [
"#\n# from __future__ import absolute_import\n# from __future__ import division\n# from __future__ import print_function\nimport random\nimport torch\nimport torch.nn.functional as F\nimport torchvision.models as models\nfrom torch.autograd import Variable\nimport numpy as np\nimport torch.nn as nn\nfrom torch.autograd import Function\nimport cv2\n\n\nclass ImageLabelResizeLayer(nn.Module):\n \"\"\"\n Resize label to be the same size with the samples\n \"\"\"\n def __init__(self):\n super(ImageLabelResizeLayer, self).__init__()\n\n\n def forward(self,x, need_backprop):\n\n feats = x.detach().cpu().numpy()\n lbs = need_backprop.detach().cpu().numpy()\n gt_blob = np.zeros((lbs.shape[0], feats.shape[2], feats.shape[3], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n lb=np.array([lbs[i]])\n lbs_resize = cv2.resize(lb, (feats.shape[3] ,feats.shape[2]), interpolation=cv2.INTER_NEAREST)\n gt_blob[i, 0:lbs_resize.shape[0], 0:lbs_resize.shape[1], 0] = lbs_resize\n\n channel_swap = (0, 3, 1, 2)\n gt_blob = gt_blob.transpose(channel_swap)\n y=Variable(torch.from_numpy(gt_blob)).cuda()\n y=y.squeeze(1).long()\n return y\n\n\nclass InstanceLabelResizeLayer(nn.Module):\n\n\n def __init__(self):\n super(InstanceLabelResizeLayer, self).__init__()\n self.minibatch=256\n\n def forward(self, x,need_backprop):\n feats = x.data.cpu().numpy()\n lbs = need_backprop.data.cpu().numpy()\n\n resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n resized_lbs[i*self.minibatch:(i+1)*self.minibatch] = lbs[i]\n\n y=torch.from_numpy(resized_lbs).cuda()\n\n return y\n\nclass FcLabelResizeLayer(nn.Module):\n\n\n def __init__(self):\n super(FcLabelResizeLayer, self).__init__()\n self.minibatch = 1\n\n def forward(self, x,need_backprop):\n feats = x.data.cpu().numpy()\n lbs = need_backprop.data.cpu().numpy()\n\n resized_lbs = np.ones((feats.shape[0], 1), dtype=np.float32)\n for i in range(lbs.shape[0]):\n resized_lbs[i*self.minibatch:(i+1)*self.minibatch] = lbs[i]\n\n y=torch.from_numpy(resized_lbs).cuda().long()\n\n return y"
] | [
[
"numpy.array",
"numpy.zeros",
"torch.from_numpy",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Burntt/MastersMachineLearning | [
"9c8896b4dfe46ee02bc5fdbca47acffbeca6828e"
] | [
"day10_Optimization_and_Regularization_in_DL/notmnist.py"
] | [
"import os\nfrom glob import glob\n\nimport numpy as np\nfrom matplotlib.pyplot import imread\nfrom sklearn.model_selection import train_test_split\n\n\ndef load_notmnist(\n path=\"./notMNIST_small\", letters=\"ABCDEFGHIJ\", img_shape=(28, 28), test_size=0.25, one_hot=False\n):\n\n # download data if it's missing. If you have any problems, go to the urls and load it manually.\n if not os.path.exists(path):\n if not os.path.exists(\"./notMNIST_small.tar.gz\"):\n print(\"Downloading data...\")\n assert (\n os.system(\n \"curl http://yaroslavvb.com/upload/notMNIST/notMNIST_small.tar.gz > notMNIST_small.tar.gz\"\n )\n == 0\n )\n print(\"Extracting ...\")\n assert os.system(\"tar -zxvf notMNIST_small.tar.gz > untar_notmnist.log\") == 0\n\n data, labels = [], []\n print(\"Parsing...\")\n for img_path in glob(os.path.join(path, \"*/*\")):\n class_i = img_path.split(os.sep)[-2]\n if class_i not in letters:\n continue\n try:\n data.append(imread(img_path))\n labels.append(\n class_i,\n )\n except BaseException:\n print(\"found broken img: %s [it's ok if <10 images are broken]\" % img_path)\n\n data = np.stack(data)[:, None].astype(\"float32\")\n data = (data - np.mean(data)) / np.std(data)\n\n # convert classes to ints\n letter_to_i = {l: i for i, l in enumerate(letters)}\n labels = np.array(list(map(letter_to_i.get, labels)))\n\n if one_hot:\n labels = (np.arange(np.max(labels) + 1)[None, :] == labels[:, None]).astype(\"float32\")\n\n # split into train/test\n X_train, X_test, y_train, y_test = train_test_split(\n data, labels, test_size=test_size, random_state=42\n )\n\n print(\"Done\")\n return X_train, y_train, X_test, y_test\n"
] | [
[
"matplotlib.pyplot.imread",
"sklearn.model_selection.train_test_split",
"numpy.stack",
"numpy.max",
"numpy.std",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pedro-abreu/twostream-attention | [
"60a47c50b8f2427911e5e30fd6c6f933dbf08a4e",
"60a47c50b8f2427911e5e30fd6c6f933dbf08a4e"
] | [
"code/code_AVA/extra/pose_test.py",
"code/code_AVA/rgb.py"
] | [
"import tensorflow as tf\nimport utils\nimport voting\nfrom pose_model import pose_create_model, compile_model\nfrom pose_data import load_split, get_AVA_set\nimport time\nfrom keras import backend as K\nimport numpy as np\nimport pickle\n\n\ndef main():\n\n root_dir = '../../data/AVA/files/'\n\n # Load list of action classes and separate them (from utils_stream)\n classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')\n\n # Parameters for training (batch size 32 is supposed to be the best?)\n # Parameters for training\n params = {'dim': (300, 300), 'batch_size': 32,\n 'n_classes': len(classes['label_id']), 'n_channels': 3,\n 'nb_epochs': 200, 'model': 'alexnet', 'email': True, 'train_chunk_size': 2**12,\n 'validation_chunk_size': 2**12}\n soft_sigmoid = True\n store_predictions = True\n minValLoss = 9999990.0\n split = \"test\"\n\n # Get validation set from directory\n partition = {}\n partition['test'] = get_AVA_set(classes=classes, filename=root_dir + \"AVA_\" + split.title() + \"_Custom_Corrected.csv\", soft_sigmoid=True)\n\n time_str = time.strftime(\"%y%m%d%H%M\", time.localtime())\n result_csv = \"output_test_pose_\" + time_str + \".csv\"\n\n # Load trained model\n pose_weights = \"../models/pose_alexnet_1808310209.hdf5\"\n model = pose_create_model(classes=classes['label_id'], soft_sigmoid=soft_sigmoid, image_shape=params['dim'], model_name=params['model'])\n model = compile_model(model, soft_sigmoid=soft_sigmoid)\n model.load_weights(pose_weights)\n\n print(\"Test set size: \" + str(len(partition['test'])))\n\n # Load chunks\n test_splits = utils.make_chunks(original_list=partition['test'], size=len(partition['test']), chunk_size=2**11)\n\n # Test directories where pre-processed test files are\n pose_dir = \"/media/pedro/actv-ssd/pose_\" + split + \"/\"\n\n test_chunks_count = 0\n\n pose_votes = {}\n obj_votes = {}\n human_votes = {}\n\n for row in partition['test']:\n row = row.split(\"@\")\n i = row[0] + \"@\" + row[1] + \"@\" + str(row[2]) + \"@\" + str(row[3]) + \"@\" + str(row[4]) + \"@\" + str(row[5])\n pose_votes[i] = np.zeros(utils.POSE_CLASSES)\n obj_votes[i] = np.zeros(utils.OBJ_HUMAN_CLASSES)\n human_votes[i] = np.zeros(utils.HUMAN_HUMAN_CLASSES)\n\n test_predictions = []\n with tf.device('/gpu:0'):\n for testIDS in test_splits:\n # TODO Technically it shouldnt return labels here (these are ground truth)\n x_test_pose, y_test_pose, y_test_object, y_test_human = load_split(testIDS, None, params['dim'], params['n_channels'], split, filter_type, soft_sigmoid=True, train=False)\n print(\"Predicting on chunk \" + str(test_chunks_count) + \"/\" + str(len(test_splits)))\n\n predictions = model.predict(x_test_pose, batch_size=params['batch_size'], verbose=1)\n if store_predictions is True:\n # print(predictions[0][0])\n # print(predictions[1][0])\n # print(predictions[2][0])\n\n # tarr = np.hstack((np.vstack(predictions[0]), np.vstack(predictions[1]), np.vstack(predictions[2])))\n test_predictions.append(predictions)\n\n # Convert predictions to readable output and perform majority voting\n voting.pred2classes(testIDS, predictions, pose_votes, obj_votes, human_votes, thresh=0.4)\n test_chunks_count += 1\n\n if store_predictions is True:\n #tp = np.vstack(test_predictions)\n # print(tp.shape)\n with open(\"thresholds/pose/predictions_pose_\" + time_str + \".pickle\", 'wb') as handle:\n pickle.dump(test_predictions, handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n # When you're done getting all the votes, write output csv\n with open(result_csv, \"a\") as output_file:\n for key in pose_votes:\n idx = key.split(\"@\")\n actions = []\n pv = pose_votes[key]\n pose_vote = pv.argmax(axis=0) + 1\n actions.append(pose_vote)\n\n # Get 3 top voted object\n ov = obj_votes[key]\n top_three_obj_votes = ov.argsort()[-3:][::-1] + utils.POSE_CLASSES + 1\n for t in top_three_obj_votes:\n if t != 0: # Often there might only be two top voted or one\n actions.append(t)\n # Get 3 top voted human\n hv = human_votes[key]\n top_three_human_votes = hv.argsort()[-3:][::-1] + utils.POSE_CLASSES + utils.OBJ_HUMAN_CLASSES + 1\n for t in top_three_human_votes:\n if t != 0: # Often there might only be two top voted or one\n actions.append(t)\n\n video_name = idx[0]\n timestamp = idx[1]\n bb_topx = idx[2]\n bb_topy = idx[3]\n bb_botx = idx[4]\n bb_boty = idx[5]\n for a in actions:\n line = video_name + \",\" + timestamp + \",\" + bb_topx + \",\" + bb_topy + \",\" + bb_botx + \",\" + bb_boty + \",\" + str(a)\n output_file.write(\"%s\\n\" % line)\n\n if params['email']:\n utils.sendemail(from_addr='[email protected]',\n to_addr_list=['[email protected]'],\n subject='Finished prediction for ' + filter_type,\n message='Testing pose with following params: ' + str(params),\n login='[email protected]',\n password='1!qwerty')\n\n\nif __name__ == '__main__':\n main()\n",
"import tensorflow as tf\n# from keras.utils import multi_gpu_model\nfrom keras.utils import to_categorical\nfrom keras import backend as K\nimport csv\nimport time\nimport timeit\n\nimport utils\nfrom rgb_model import rgb_create_model, compile_model\nfrom rgb_data import load_split, get_AVA_set, get_AVA_labels\n\n\ndef main():\n # root_dir = '../../../AVA2.1/' # root_dir for the files\n root_dir = '../../data/AVA/files/'\n\n # Erase previous models from GPU memory\n K.clear_session()\n\n # Load list of action classes and separate them\n classes = utils.get_AVA_classes(root_dir + 'ava_action_list_custom.csv')\n\n # Parameters for training\n params = {'dim': (224, 224), 'batch_size': 32,\n 'n_classes': len(classes['label_id']), 'n_channels': 3,\n 'shuffle': False, 'nb_epochs': 200, 'model': 'inceptionv3', 'email': True,\n 'freeze_all': True, 'conv_fusion': False, 'train_chunk_size': 2**12,\n 'validation_chunk_size': 2**12}\n soft_sigmoid = True\n minValLoss = 9999990.0\n\n # Get ID's and labels from the actual dataset\n partition = {}\n partition['train'] = get_AVA_set(classes=classes, filename=root_dir + \"AVA_Train_Custom_Corrected.csv\", soft_sigmoid=soft_sigmoid) # IDs for training\n partition['validation'] = get_AVA_set(classes=classes, filename=root_dir + \"AVA_Val_Custom_Corrected.csv\", soft_sigmoid=soft_sigmoid) # IDs for validation\n\n # Labels\n labels_train = get_AVA_labels(classes, partition, \"train\", filename=root_dir + \"AVA_Train_Custom_Corrected.csv\", soft_sigmoid=soft_sigmoid)\n labels_val = get_AVA_labels(classes, partition, \"validation\", filename=root_dir + \"AVA_Val_Custom_Corrected.csv\", soft_sigmoid=soft_sigmoid)\n\n # Create + compile model, load saved weights if they exist\n saved_weights = None\n # saved_weights = \"../models/rgbextra_gauss_resnet50_1807250030.hdf5\"\n model, keras_layer_names = rgb_create_model(classes=classes['label_id'], soft_sigmoid=soft_sigmoid, model_name=params['model'], freeze_all=params['freeze_all'], conv_fusion=params['conv_fusion'])\n model = compile_model(model, soft_sigmoid=soft_sigmoid)\n\n # TODO Experiment: 1. no initialization, 2. ucf initialization 3. kinetics initialization\n initialization = True # Set to True to use initialization\n kinetics_weights = None\n ucf_weights = \"a\"\n\n if saved_weights is not None:\n model.load_weights(saved_weights)\n else:\n if initialization is True:\n if ucf_weights is None:\n print(\"Loading MConvNet weights: \")\n if params['model'] == \"resnet50\":\n ucf_weights = utils.loadmat(\"../models/ucf_matconvnet/ucf101-img-resnet-50-split1.mat\")\n utils.convert_resnet(model, ucf_weights)\n model.save(\"../models/ucf_keras/keras-ucf101-rgb-resnet50-newsplit.hdf5\")\n if kinetics_weights is None:\n if params['model'] == \"inceptionv3\":\n print(\"Loading Keras weights: \")\n keras_weights = [\"../models/kinetics_keras/tsn_rgb_params_names.pkl\", \"../models/kinetics_keras/tsn_rgb_params.pkl\"]\n utils.convert_inceptionv3(model, keras_weights, keras_layer_names)\n model.save(\"../models/kinetics_keras/keras-kinetics-rgb-inceptionv3.hdf5\")\n # Try to train on more than 1 GPU if possible\n # try:\n # print(\"Trying MULTI-GPU\")\n # model = multi_gpu_model(model)\n\n print(\"Training set size: \" + str(len(partition['train'])))\n\n # Make spltis\n train_splits = utils.make_chunks(original_list=partition['train'], size=len(\n partition['train']), chunk_size=params['train_chunk_size'])\n val_splits = utils.make_chunks(original_list=partition['validation'], size=len(\n partition['validation']), chunk_size=params['validation_chunk_size'])\n\n time_str = time.strftime(\"%y%m%d%H%M\", time.localtime())\n\n # TODO Don't forget to change your names :)\n filter_type = \"gauss\"\n bestModelPath = \"../models/rgb_kininit_\" + filter_type + \\\n \"_\" + params['model'] + \"_\" + time_str + \".hdf5\"\n traincsvPath = \"../loss_acc_plots/rgb_kininit_train_\" + filter_type + \\\n \"_plot_\" + params['model'] + \"_\" + time_str + \".csv\"\n valcsvPath = \"../loss_acc_plots/rgb_kininit_val_\" + filter_type + \\\n \"_plot_\" + params['model'] + \"_\" + time_str + \".csv\"\n\n with tf.device('/gpu:0'): # NOTE Not using multi gpu\n for epoch in range(params['nb_epochs']):\n epoch_chunks_count = 0\n for trainIDS in train_splits:\n # Load and train\n start_time = timeit.default_timer()\n # -----------------------------------------------------------\n x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None\n x_train, y_train_pose, y_train_object, y_train_human = load_split(trainIDS, labels_train, params[\n 'dim'], params['n_channels'], \"train\", filter_type, soft_sigmoid=soft_sigmoid)\n\n y_t = []\n y_t.append(to_categorical(y_train_pose, num_classes=utils.POSE_CLASSES))\n y_t.append(utils.to_binary_vector(y_train_object, size=utils.OBJ_HUMAN_CLASSES, labeltype='object-human'))\n y_t.append(utils.to_binary_vector(y_train_human, size=utils.HUMAN_HUMAN_CLASSES, labeltype='human-human'))\n\n history = model.fit(x_train, y_t, batch_size=params[\n 'batch_size'], epochs=1, verbose=0)\n utils.learning_rate_schedule(model, epoch, params['nb_epochs'])\n\n # TODO Repeat samples of unrepresented classes?\n\n # ------------------------------------------------------------\n elapsed = timeit.default_timer() - start_time\n\n print(\"Epoch \" + str(epoch) + \" chunk \" + str(epoch_chunks_count) + \" (\" + str(elapsed) + \") acc[pose,obj,human] = [\" + str(history.history['pred_pose_categorical_accuracy']) + \",\" +\n str(history.history['pred_obj_human_categorical_accuracy']) + \",\" + str(history.history['pred_human_human_categorical_accuracy']) + \"] loss: \" + str(history.history['loss']))\n with open(traincsvPath, 'a') as f:\n writer = csv.writer(f)\n avg_acc = (history.history['pred_pose_categorical_accuracy'][0] + history.history['pred_obj_human_categorical_accuracy']\n [0] + history.history['pred_human_human_categorical_accuracy'][0]) / 3\n writer.writerow([str(avg_acc), history.history['pred_pose_categorical_accuracy'], history.history['pred_obj_human_categorical_accuracy'],\n history.history['pred_human_human_categorical_accuracy'], history.history['loss']])\n epoch_chunks_count += 1\n # Load val_data\n print(\"Validating data: \")\n # global_loss, pose_loss, object_loss, human_loss, pose_acc, object_acc, human_acc\n loss_acc_list = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]\n for valIDS in val_splits:\n x_val = y_val_pose = y_val_object = y_val_human = x_train = y_train_pose = y_train_object = y_train_human = None\n x_val, y_val_pose, y_val_object, y_val_human = load_split(valIDS, labels_val, params[\n 'dim'], params['n_channels'], \"val\", filter_type, soft_sigmoid=soft_sigmoid)\n y_v = []\n y_v.append(to_categorical(\n y_val_pose, num_classes=utils.POSE_CLASSES))\n y_v.append(utils.to_binary_vector(\n y_val_object, size=utils.OBJ_HUMAN_CLASSES, labeltype='object-human'))\n y_v.append(utils.to_binary_vector(\n y_val_human, size=utils.HUMAN_HUMAN_CLASSES, labeltype='human-human'))\n\n vglobal_loss, vpose_loss, vobject_loss, vhuman_loss, vpose_acc, vobject_acc, vhuman_acc = model.evaluate(\n x_val, y_v, batch_size=params['batch_size'])\n loss_acc_list[0] += vglobal_loss\n loss_acc_list[1] += vpose_loss\n loss_acc_list[2] += vobject_loss\n loss_acc_list[3] += vhuman_loss\n loss_acc_list[4] += vpose_acc\n loss_acc_list[5] += vobject_acc\n\n loss_acc_list[6] += vhuman_acc\n # Average over all validation chunks\n loss_acc_list = [x / len(val_splits) for x in loss_acc_list]\n with open(valcsvPath, 'a') as f:\n writer = csv.writer(f)\n # We consider accuracy as the average accuracy over the three\n # types of accuracy\n acc = (loss_acc_list[4] +\n loss_acc_list[5] + loss_acc_list[6]) / 3\n writer.writerow([str(acc), loss_acc_list[4], loss_acc_list[5], loss_acc_list[6],\n loss_acc_list[0], loss_acc_list[1], loss_acc_list[2], loss_acc_list[3]])\n if loss_acc_list[0] < minValLoss:\n print(\"New best loss \" + str(loss_acc_list[0]))\n model.save(bestModelPath)\n minValLoss = loss_acc_list[0]\n\n if params['email']:\n utils.sendemail(from_addr='[email protected]',\n to_addr_list=['[email protected]'],\n subject='Finished training RGB-stream ',\n message='Training RGB with following params: ' +\n str(params),\n login='[email protected]',\n password='1!qwerty')\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"tensorflow.device",
"numpy.zeros"
],
[
"tensorflow.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
greatwallet/cosypose | [
"e72ce7d521ef61870daef267cbbe65aaebe9d24d",
"e72ce7d521ef61870daef267cbbe65aaebe9d24d"
] | [
"evaluation/meters/detection_meters.py",
"multiview/bundle_adjustment.py"
] | [
"import numpy as np\r\nfrom sklearn.metrics import average_precision_score\r\nimport xarray as xr\r\nimport torchvision\r\nimport torch\r\nfrom torch.utils.data import TensorDataset, DataLoader\r\nfrom .base import Meter\r\n\r\nfrom .utils import (match_poses, get_top_n_ids,\r\n add_valid_gt, get_candidate_matches, add_inst_num)\r\nfrom cosypose.utils.xarray import xr_merge\r\n\r\n\r\nclass DetectionMeter(Meter):\r\n def __init__(self,\r\n iou_threshold=0.5,\r\n errors_bsz=512,\r\n consider_all_predictions=False,\r\n targets=None,\r\n visib_gt_min=-1,\r\n n_top=-1):\r\n\r\n self.iou_threshold = iou_threshold\r\n self.consider_all_predictions = consider_all_predictions\r\n self.targets = targets\r\n self.visib_gt_min = visib_gt_min\r\n self.errors_bsz = errors_bsz\r\n self.n_top = n_top\r\n self.reset()\r\n\r\n def compute_metrics(self, bbox_pred, bbox_gt):\r\n iou_all = torchvision.ops.box_iou(bbox_pred, bbox_gt)\r\n arange_n = torch.arange(len(bbox_pred))\r\n iou = iou_all[arange_n, arange_n]\r\n return dict(iou=iou)\r\n\r\n def compute_metrics_batch(self, bbox_pred, bbox_gt):\r\n metrics = []\r\n ids = torch.arange(len(bbox_pred))\r\n ds = TensorDataset(bbox_pred, bbox_gt, ids)\r\n dl = DataLoader(ds, batch_size=self.errors_bsz)\r\n\r\n for (bbox_pred_, bbox_gt_, ids_) in dl:\r\n metrics.append(self.compute_metrics(bbox_pred_, bbox_gt_))\r\n\r\n if len(metrics) == 0:\r\n metrics.append(dict(\r\n iou=torch.empty(0, dtype=torch.float),\r\n ))\r\n\r\n metricsd = dict()\r\n for k in metrics[0].keys():\r\n metricsd[k] = torch.cat([metrics_n[k] for metrics_n in metrics], dim=0)\r\n return metricsd\r\n\r\n def add(self, pred_data, gt_data):\r\n group_keys = ['scene_id', 'view_id', 'label']\r\n\r\n pred_data = pred_data.float()\r\n gt_data = gt_data.float()\r\n\r\n # Only keep predictions relevant to gt scene and images.\r\n gt_infos = gt_data.infos.loc[:, ['scene_id', 'view_id']].drop_duplicates().reset_index(drop=True)\r\n targets = self.targets\r\n if targets is not None:\r\n targets = gt_infos.merge(targets)\r\n pred_data.infos['batch_pred_id'] = np.arange(len(pred_data))\r\n keep_ids = gt_infos.merge(pred_data.infos)['batch_pred_id']\r\n pred_data = pred_data[keep_ids]\r\n\r\n # Add inst id to the dataframes\r\n pred_data.infos = add_inst_num(pred_data.infos, key='pred_inst_id', group_keys=group_keys)\r\n gt_data.infos = add_inst_num(gt_data.infos, key='gt_inst_id', group_keys=group_keys)\r\n\r\n # Filter predictions according to BOP evaluation.\r\n if not self.consider_all_predictions:\r\n ids_top_n_pred = get_top_n_ids(pred_data.infos,\r\n group_keys=group_keys, top_key='score',\r\n targets=targets, n_top=self.n_top)\r\n pred_data_filtered = pred_data.clone()[ids_top_n_pred]\r\n else:\r\n pred_data_filtered = pred_data.clone()\r\n\r\n # Compute valid targets according to BOP evaluation.\r\n gt_data.infos = add_valid_gt(gt_data.infos,\r\n group_keys=group_keys,\r\n targets=targets,\r\n visib_gt_min=self.visib_gt_min)\r\n\r\n # Compute tentative candidates\r\n cand_infos = get_candidate_matches(pred_data_filtered.infos, gt_data.infos,\r\n group_keys=group_keys,\r\n only_valids=True)\r\n pred_ids = cand_infos['pred_id'].values.tolist()\r\n gt_ids = cand_infos['gt_id'].values.tolist()\r\n cand_bbox_gt = gt_data.bboxes[gt_ids]\r\n cand_bbox_pred = pred_data_filtered.bboxes[pred_ids]\r\n\r\n # Compute metrics for tentative matches\r\n metrics = self.compute_metrics_batch(cand_bbox_pred, cand_bbox_gt)\r\n\r\n # Matches can only be candidates within thresholds\r\n cand_infos['iou'] = metrics['iou'].cpu().numpy()\r\n keep = cand_infos['iou'] >= self.iou_threshold\r\n cand_infos = cand_infos[keep].reset_index(drop=True)\r\n\r\n # Match predictions to ground truth detections\r\n cand_infos['error'] = - cand_infos['iou']\r\n matches = match_poses(cand_infos, group_keys=group_keys)\r\n\r\n # Save all informations in xarray datasets\r\n gt_keys = group_keys + ['gt_inst_id', 'valid'] + (['visib_fract'] if 'visib_fract' in gt_infos else [])\r\n gt = gt_data.infos.loc[:, gt_keys]\r\n preds = pred_data.infos.loc[:, group_keys + ['pred_inst_id', 'score']]\r\n matches = matches.loc[:, group_keys + ['pred_inst_id', 'gt_inst_id', 'cand_id']]\r\n\r\n gt = xr.Dataset(gt).rename({'dim_0': 'gt_id'})\r\n matches = xr.Dataset(matches).rename({'dim_0': 'match_id'})\r\n preds = xr.Dataset(preds).rename({'dim_0': 'pred_id'})\r\n\r\n ious = metrics['iou'].cpu().numpy()[matches['cand_id'].values]\r\n matches['iou'] = 'match_id', ious\r\n matches['iou_valid'] = 'match_id', ious >= self.iou_threshold\r\n\r\n fill_values = {\r\n 'iou': np.nan,\r\n 'iou_valid': False,\r\n 'score': np.nan,\r\n }\r\n matches = xr_merge(matches, preds, on=group_keys + ['pred_inst_id'],\r\n dim1='match_id', dim2='pred_id', fill_value=fill_values)\r\n gt = xr_merge(gt, matches, on=group_keys + ['gt_inst_id'],\r\n dim1='gt_id', dim2='match_id', fill_value=fill_values)\r\n\r\n preds_match_merge = xr_merge(preds, matches, on=group_keys+['pred_inst_id'],\r\n dim1='pred_id', dim2='match_id', fill_value=fill_values)\r\n preds['iou_valid'] = 'pred_id', preds_match_merge['iou_valid']\r\n\r\n self.datas['gt_df'].append(gt)\r\n self.datas['pred_df'].append(preds)\r\n self.datas['matches_df'].append(matches)\r\n\r\n def summary(self):\r\n gt_df = xr.concat(self.datas['gt_df'], dim='gt_id')\r\n matches_df = xr.concat(self.datas['matches_df'], dim='match_id')\r\n pred_df = xr.concat(self.datas['pred_df'], dim='pred_id')\r\n valid_df = gt_df.sel(gt_id=gt_df['valid'])\r\n\r\n # AP/mAP @ IoU < threshold\r\n valid_k = 'iou_valid'\r\n n_gts = dict()\r\n\r\n if self.n_top > 0:\r\n group_keys = ['scene_id', 'view_id', 'label']\r\n subdf = gt_df[[*group_keys, 'valid']].to_dataframe().groupby(group_keys).sum().reset_index()\r\n subdf['gt_count'] = np.minimum(self.n_top, subdf['valid'])\r\n for label, group in subdf.groupby('label'):\r\n n_gts[label] = group['gt_count'].sum()\r\n else:\r\n subdf = gt_df[['label', 'valid']].groupby('label').sum()\r\n for label in subdf['label'].values:\r\n n_gts[label] = subdf.sel(label=label)['valid'].item()\r\n\r\n ap_dfs = dict()\r\n\r\n def compute_ap(label_df, label_n_gt):\r\n label_df = label_df.sort_values('score', ascending=False).reset_index(drop=True)\r\n label_df['n_tp'] = np.cumsum(label_df[valid_k].values.astype(np.float))\r\n label_df['prec'] = label_df['n_tp'] / (np.arange(len(label_df)) + 1)\r\n label_df['recall'] = label_df['n_tp'] / label_n_gt\r\n y_true = label_df[valid_k]\r\n y_score = label_df['score']\r\n ap = average_precision_score(y_true, y_score) * y_true.sum() / label_n_gt\r\n label_df['AP'] = ap\r\n label_df['n_gt'] = label_n_gt\r\n return ap, label_df\r\n\r\n df = pred_df[['label', valid_k, 'score']].to_dataframe().set_index(['label'])\r\n for label, label_n_gt in n_gts.items():\r\n if df.index.contains(label):\r\n label_df = df.loc[[label]]\r\n if label_df[valid_k].sum() > 0:\r\n ap, label_df = compute_ap(label_df, label_n_gt)\r\n ap_dfs[label] = label_df\r\n\r\n if len(ap_dfs) > 0:\r\n mAP = np.mean([np.unique(ap_df['AP']).item() for ap_df in ap_dfs.values()])\r\n AP, ap_dfs['all'] = compute_ap(df.reset_index(), sum(list(n_gts.values())))\r\n else:\r\n AP, mAP = 0., 0.\r\n n_gt_valid = int(sum(list(n_gts.values())))\r\n\r\n summary = {\r\n 'n_gt': len(gt_df['gt_id']),\r\n 'n_gt_valid': n_gt_valid,\r\n 'n_pred': len(pred_df['pred_id']),\r\n 'n_matched': len(matches_df['match_id']),\r\n 'matched_gt_ratio': len(matches_df['match_id']) / n_gt_valid,\r\n 'pred_matched_ratio': len(pred_df['pred_id']) / max(len(matches_df['match_id']), 1),\r\n 'iou_valid_recall': valid_df['iou_valid'].sum('gt_id').item() / n_gt_valid,\r\n }\r\n\r\n summary.update({\r\n 'AP': AP,\r\n 'mAP': mAP,\r\n })\r\n\r\n dfs = dict(gt=gt_df, matches=matches_df, preds=pred_df, ap=ap_dfs)\r\n return summary, dfs\r\n",
"import numpy as np\r\nfrom collections import defaultdict\r\nimport torch\r\nimport pandas as pd\r\nfrom scipy.sparse import csr_matrix\r\nfrom scipy.sparse.csgraph import connected_components\r\n\r\nimport cosypose.utils.tensor_collection as tc\r\n\r\nfrom cosypose.lib3d.transform_ops import invert_T, compute_transform_from_pose9d\r\nfrom cosypose.lib3d.camera_geometry import project_points\r\nfrom cosypose.lib3d.symmetric_distances import symmetric_distance_reprojected\r\n\r\nfrom .ransac import make_obj_infos\r\n\r\n\r\nfrom cosypose.utils.logging import get_logger\r\nfrom cosypose.utils.timer import Timer\r\nlogger = get_logger(__name__)\r\n\r\n\r\ndef make_view_groups(pairs_TC1C2):\r\n views = pairs_TC1C2.infos.loc[:, ['view1', 'view2']].values.T\r\n views = np.unique(views.reshape(-1))\r\n view_df = pd.DataFrame(dict(view_id=views, view_local_id=np.arange(len(views))))\r\n view_to_id = view_df.set_index('view_id')\r\n view1 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view1'], 'view_local_id'].values\r\n view2 = view_to_id.loc[pairs_TC1C2.infos.loc[:, 'view2'], 'view_local_id'].values\r\n data = np.ones(len(view1))\r\n n_views = len(views)\r\n graph = csr_matrix((data, (view1, view2)), shape=(n_views, n_views))\r\n n_components, ids = connected_components(graph, directed=True, connection='strong')\r\n view_df['view_group'] = ids\r\n view_df = view_df.drop(columns=['view_local_id'])\r\n return view_df\r\n\r\n\r\nclass SamplerError(Exception):\r\n pass\r\n\r\n\r\nclass MultiviewRefinement:\r\n def __init__(self, candidates, cameras, pairs_TC1C2, mesh_db):\r\n\r\n self.device, self.dtype = candidates.device, candidates.poses.dtype\r\n self.mesh_db = mesh_db\r\n cameras = cameras.to(self.device).to(self.dtype)\r\n pairs_TC1C2 = pairs_TC1C2.to(self.device).to(self.dtype)\r\n\r\n view_ids = np.unique(candidates.infos['view_id'])\r\n keep_ids = np.logical_and(\r\n np.isin(pairs_TC1C2.infos['view1'], view_ids),\r\n np.isin(pairs_TC1C2.infos['view2'], view_ids),\r\n )\r\n pairs_TC1C2 = pairs_TC1C2[np.where(keep_ids)[0]]\r\n\r\n keep_ids = np.where(np.isin(cameras.infos['view_id'], view_ids))[0]\r\n cameras = cameras[keep_ids]\r\n\r\n self.cam_infos = cameras.infos\r\n self.view_to_id = {view_id: n for n, view_id in enumerate(self.cam_infos['view_id'])}\r\n self.K = cameras.K\r\n self.n_views = len(self.cam_infos)\r\n\r\n self.obj_infos = make_obj_infos(candidates)\r\n self.obj_to_id = {obj_id: n for n, obj_id in enumerate(self.obj_infos['obj_id'])}\r\n self.obj_points = self.mesh_db.select(self.obj_infos['label'].values).points\r\n self.n_points = self.obj_points.shape[1]\r\n self.n_objects = len(self.obj_infos)\r\n\r\n self.cand = candidates\r\n self.cand_TCO = candidates.poses\r\n self.cand_labels = candidates.infos['label']\r\n self.cand_view_ids = [self.view_to_id[view_id] for view_id in candidates.infos['view_id']]\r\n self.cand_obj_ids = [self.obj_to_id[obj_id] for obj_id in candidates.infos['obj_id']]\r\n self.n_candidates = len(self.cand_TCO)\r\n self.visibility_matrix = self.make_visibility_matrix(self.cand_view_ids, self.cand_obj_ids)\r\n\r\n self.v2v1_TC2C1_map = {(self.view_to_id[v2], self.view_to_id[v1]): invert_T(TC1C2) for\r\n (v1, v2, TC1C2) in zip(pairs_TC1C2.infos['view1'],\r\n pairs_TC1C2.infos['view2'],\r\n pairs_TC1C2.TC1C2)}\r\n self.ov_TCO_cand_map = {(o, v): TCO for (o, v, TCO) in zip(self.cand_obj_ids,\r\n self.cand_view_ids,\r\n self.cand_TCO)}\r\n self.residuals_ids = self.make_residuals_ids()\r\n\r\n def make_visibility_matrix(self, cand_view_ids, cand_obj_ids):\r\n matrix = torch.zeros(self.n_objects, self.n_views, dtype=torch.int, device=self.device)\r\n matrix[cand_obj_ids, cand_view_ids] = 1\r\n return matrix\r\n\r\n def make_residuals_ids(self):\r\n cand_ids, obj_ids, view_ids, point_ids, xy_ids = [], [], [], [], []\r\n for cand_id in range(self.n_candidates):\r\n for point_id in range(self.n_points):\r\n for xy_id in range(2):\r\n cand_ids.append(cand_id)\r\n obj_ids.append(self.cand_obj_ids[cand_id])\r\n view_ids.append(self.cand_view_ids[cand_id])\r\n point_ids.append(point_id)\r\n xy_ids.append(xy_id)\r\n residuals_ids = dict(\r\n cand_id=cand_ids,\r\n obj_id=obj_ids,\r\n view_id=view_ids,\r\n point_id=point_ids,\r\n xy_id=xy_ids,\r\n )\r\n return residuals_ids\r\n\r\n def sample_initial_TWO_TWC(self, seed):\r\n TWO = torch.zeros(self.n_objects, 4, 4, dtype=self.dtype, device=self.device) * float('nan')\r\n TWC = torch.zeros(self.n_views, 4, 4, dtype=self.dtype, device=self.device) * float('nan')\r\n\r\n object_to_views = defaultdict(set)\r\n for v in range(self.n_views):\r\n for o in range(self.n_objects):\r\n if self.visibility_matrix[o, v]:\r\n object_to_views[o].add(v)\r\n\r\n np_random = np.random.RandomState(seed)\r\n views_ordered = np_random.permutation(np.arange(self.n_views))\r\n objects_ordered = np_random.permutation(np.arange(self.n_objects))\r\n\r\n w = views_ordered[0]\r\n TWC[w] = torch.eye(4, 4, device=self.device, dtype=self.dtype)\r\n views_initialized = {w, }\r\n views_to_initialize = set(np.arange(self.n_views)) - views_initialized\r\n\r\n n_pass = 20\r\n n = 0\r\n # Initialize views\r\n while len(views_to_initialize) > 0:\r\n for v1 in views_ordered:\r\n if v1 in views_to_initialize:\r\n for v2 in views_ordered:\r\n if v2 not in views_initialized:\r\n continue\r\n if (v2, v1) in self.v2v1_TC2C1_map:\r\n TC2C1 = self.v2v1_TC2C1_map[(v2, v1)]\r\n TWC2 = TWC[v2]\r\n TWC[v1] = TWC2 @ TC2C1\r\n views_to_initialize.remove(v1)\r\n views_initialized.add(v1)\r\n break\r\n n += 1\r\n if n >= n_pass:\r\n raise SamplerError('Cannot find an initialization')\r\n\r\n # Initialize objects\r\n for o in objects_ordered:\r\n for v in views_ordered:\r\n if v in object_to_views[o]:\r\n TWO[o] = TWC[v] @ self.ov_TCO_cand_map[(o, v)]\r\n break\r\n return TWO, TWC\r\n\r\n @staticmethod\r\n def extract_pose9d(T):\r\n T_9d = torch.cat((T[..., :3, :2].transpose(-1, -2).flatten(-2, -1), T[..., :3, -1]), dim=-1)\r\n return T_9d\r\n\r\n def align_TCO_cand(self, TWO_9d, TCW_9d):\r\n TWO = compute_transform_from_pose9d(TWO_9d)\r\n TCW = compute_transform_from_pose9d(TCW_9d)\r\n TCO = TCW[self.cand_view_ids] @ TWO[self.cand_obj_ids]\r\n\r\n dists, sym = symmetric_distance_reprojected(self.cand_TCO, TCO,\r\n self.K[self.cand_view_ids],\r\n self.cand_labels, self.mesh_db)\r\n TCO_cand_aligned = self.cand_TCO @ sym\r\n return dists, TCO_cand_aligned\r\n\r\n def forward_jacobian(self, TWO_9d, TCW_9d, residuals_threshold):\r\n _, TCO_cand_aligned = self.align_TCO_cand(TWO_9d, TCW_9d)\r\n\r\n # NOTE: This could be *much* faster by computing gradients manually, reducing number of operations.\r\n cand_ids, view_ids, obj_ids, point_ids, xy_ids = [\r\n self.residuals_ids[k] for k in ('cand_id', 'view_id', 'obj_id', 'point_id', 'xy_id')\r\n ]\r\n\r\n n_residuals = len(cand_ids) # Number of residuals\r\n arange_n = torch.arange(n_residuals)\r\n\r\n TCW_9d = TCW_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()\r\n TWO_9d = TWO_9d.unsqueeze(0).repeat(n_residuals, 1, 1).requires_grad_()\r\n\r\n TWO = compute_transform_from_pose9d(TWO_9d)\r\n TCW = compute_transform_from_pose9d(TCW_9d)\r\n\r\n TWO_n = TWO[arange_n, obj_ids]\r\n TCW_n = TCW[arange_n, view_ids]\r\n TCO_n = TCW_n @ TWO_n\r\n K_n = self.K[view_ids]\r\n\r\n TCO_cand_n = TCO_cand_aligned[cand_ids]\r\n\r\n points_n = self.obj_points[obj_ids, point_ids].unsqueeze(1)\r\n\r\n TCO_points_n = project_points(points_n, K_n, TCO_n).squeeze(1)[arange_n, xy_ids]\r\n TCO_cand_points_n = project_points(points_n, K_n, TCO_cand_n).squeeze(1)[arange_n, xy_ids]\r\n\r\n y = TCO_cand_points_n\r\n yhat = TCO_points_n\r\n errors = y - yhat\r\n residuals = (errors ** 2)\r\n residuals = torch.min(residuals, torch.ones_like(residuals) * residuals_threshold)\r\n\r\n loss = residuals.mean()\r\n if torch.is_grad_enabled():\r\n yhat.sum().backward()\r\n\r\n return errors, loss, TWO_9d.grad, TCW_9d.grad\r\n\r\n def compute_lm_step(self, errors, J, lambd):\r\n errors = errors.view(errors.numel(), 1)\r\n A = J.t() @ J + lambd * self.idJ\r\n b = J.t() @ errors\r\n # Pinverse is faster on CPU.\r\n h = torch.pinverse(A.cpu()).cuda() @ b\r\n return h.flatten()\r\n\r\n def optimize_lm(self, TWO_9d, TCW_9d,\r\n optimize_cameras=True,\r\n n_iterations=50, residuals_threshold=25,\r\n lambd0=1e-3, L_down=9, L_up=11, eps=1e-5):\r\n # See http://people.duke.edu/~hpgavin/ce281/lm.pdf\r\n n_params_TWO = TWO_9d.numel()\r\n n_params_TCW = TCW_9d.numel()\r\n n_params = n_params_TWO + n_params_TCW\r\n self.idJ = torch.eye(n_params).to(self.device).to(self.dtype)\r\n\r\n prev_iter_is_update = False\r\n lambd = lambd0\r\n done = False\r\n history = defaultdict(list)\r\n for n in range(n_iterations):\r\n\r\n if not prev_iter_is_update:\r\n errors, loss, J_TWO, J_TCW = self.forward_jacobian(TWO_9d, TCW_9d, residuals_threshold)\r\n\r\n history['TWO_9d'].append(TWO_9d)\r\n history['TCW_9d'].append(TCW_9d)\r\n history['loss'].append(loss)\r\n history['lambda'].append(lambd)\r\n history['iteration'].append(n)\r\n\r\n if done:\r\n break\r\n\r\n # NOTE: This should not be necessary ?\r\n with torch.no_grad():\r\n J = torch.cat((J_TWO.flatten(-2, -1), J_TCW.flatten(-2, -1)), dim=-1)\r\n h = self.compute_lm_step(errors, J, lambd)\r\n h_TWO_9d = h[:n_params_TWO].view(self.n_objects, 9)\r\n h_TCW_9d = h[n_params_TWO:].view(self.n_views, 9)\r\n TWO_9d_updated = TWO_9d + h_TWO_9d\r\n if optimize_cameras:\r\n TCW_9d_updated = TCW_9d + h_TCW_9d\r\n else:\r\n TCW_9d_updated = TCW_9d\r\n\r\n errors, next_loss, J_TWO, J_TCW = self.forward_jacobian(TWO_9d_updated, TCW_9d_updated, residuals_threshold)\r\n\r\n rho = loss - next_loss\r\n if rho.abs() < eps:\r\n done = True\r\n elif rho > eps:\r\n TWO_9d = TWO_9d_updated\r\n TCW_9d = TCW_9d_updated\r\n loss = next_loss\r\n lambd = max(lambd / L_down, 1e-7)\r\n prev_iter_is_update = True\r\n else:\r\n lambd = min(lambd * L_up, 1e7)\r\n prev_iter_is_update = False\r\n return TWO_9d, TCW_9d, history\r\n\r\n def robust_initialization_TWO_TCW(self, n_init=1):\r\n TWO_9d_init = []\r\n TCW_9d_init = []\r\n dists = []\r\n for n in range(n_init):\r\n TWO, TWC = self.sample_initial_TWO_TWC(n)\r\n TCW = invert_T(TWC)\r\n TWO_9d, TCW_9d = self.extract_pose9d(TWO), self.extract_pose9d(TCW)\r\n dists_, _ = self.align_TCO_cand(TWO_9d, TCW_9d)\r\n TWO_9d_init.append(TWO_9d)\r\n TCW_9d_init.append(TCW_9d)\r\n dists.append(dists_.mean())\r\n best_iter = torch.tensor(dists).argmin()\r\n return TWO_9d_init[best_iter], TCW_9d_init[best_iter]\r\n\r\n def make_scene_infos(self, TWO_9d, TCW_9d):\r\n TWO = compute_transform_from_pose9d(TWO_9d)\r\n TCW = compute_transform_from_pose9d(TCW_9d)\r\n TWC = invert_T(TCW)\r\n objects = tc.PandasTensorCollection(\r\n infos=self.obj_infos,\r\n TWO=TWO,\r\n )\r\n cameras = tc.PandasTensorCollection(\r\n infos=self.cam_infos,\r\n TWC=TWC,\r\n K=self.K\r\n )\r\n return objects, cameras\r\n\r\n def convert_history(self, history):\r\n history['objects'] = []\r\n history['cameras'] = []\r\n for n in range(len(history['iteration'])):\r\n TWO_9d = history['TWO_9d'][n]\r\n TCW_9d = history['TCW_9d'][n]\r\n objects, cameras = self.make_scene_infos(TWO_9d, TCW_9d)\r\n history['objects'].append(objects)\r\n history['cameras'].append(cameras)\r\n return history\r\n\r\n def solve(self, sample_n_init=1, **lm_kwargs):\r\n timer_init = Timer()\r\n timer_opt = Timer()\r\n timer_misc = Timer()\r\n\r\n timer_init.start()\r\n TWO_9d_init, TCW_9d_init = self.robust_initialization_TWO_TCW(n_init=sample_n_init)\r\n timer_init.pause()\r\n\r\n timer_opt.start()\r\n TWO_9d_opt, TCW_9d_opt, history = self.optimize_lm(\r\n TWO_9d_init, TCW_9d_init, **lm_kwargs)\r\n timer_opt.pause()\r\n\r\n timer_misc.start()\r\n objects, cameras = self.make_scene_infos(TWO_9d_opt, TCW_9d_opt)\r\n objects_init, cameras_init = self.make_scene_infos(TWO_9d_init, TCW_9d_init)\r\n history = self.convert_history(history)\r\n timer_misc.pause()\r\n\r\n outputs = dict(\r\n objects_init=objects_init,\r\n cameras_init=cameras_init,\r\n objects=objects,\r\n cameras=cameras,\r\n history=history,\r\n time_init=timer_init.stop(),\r\n time_opt=timer_opt.stop(),\r\n time_misc=timer_misc.stop(),\r\n )\r\n return outputs\r\n"
] | [
[
"numpy.minimum",
"torch.empty",
"torch.cat",
"numpy.unique",
"torch.utils.data.TensorDataset",
"torch.utils.data.DataLoader",
"sklearn.metrics.average_precision_score"
],
[
"numpy.isin",
"torch.zeros",
"numpy.unique",
"numpy.arange",
"torch.eye",
"scipy.sparse.csr_matrix",
"torch.tensor",
"torch.no_grad",
"torch.arange",
"torch.is_grad_enabled",
"numpy.random.RandomState",
"numpy.where",
"torch.ones_like",
"scipy.sparse.csgraph.connected_components"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Chandler/color_science_papers | [
"c43addd63d5dc9bc7ed5f093f60432c929d13b8a"
] | [
"a_simple_algorithm_for_metamer_mismatch_bodies/metamer_mismatch_body.py"
] | [
"import numpy as np\nfrom scipy import optimize\nfrom numpy.testing import assert_array_almost_equal as almost_equal\n\nCOLOR_DIMENSIONS = 3\nLIGHT_DIMENSIONS = 31\n\n# vector representing a light beam with power 1 at every wavelength\nequal_energy_illumination_vector = [1] * LIGHT_DIMENSIONS\n\ndef assert_shape(m, shape):\n if m.shape != shape:\n raise ValueError(\"incorrect shape expected: {} found: {}\".format(m.shape, shape))\n\ndef sample_unit_sphere(npoints):\n \"\"\"\n return `npoints` random points on the unit sphere\n \"\"\"\n vec = np.random.randn(3, npoints)\n vec /= np.linalg.norm(vec, axis=0)\n return vec.T\n\ndef solve_linear_program(\n object_function_coefficents,\n constraint_function=None,\n constraint_function_required_value=None,\n bounds=None):\n \"\"\"\n This method minimizes and maximizes a linear function with respect to\n an equality constraint and lower and upper bounds\n\n https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.linprog.html\n Minimize: c^T * x\n Subject to: \n A_ub * x <= b_ub\n A_eq * x == b_eq\n \"\"\"\n xmax = \\\n optimize.linprog(\n c=object_function_coefficents,\n A_eq=constraint_function,\n b_eq=constraint_function_required_value,\n bounds=bounds).x\n xmin = \\\n optimize.linprog(\n c=object_function_coefficents * -1,\n A_eq=constraint_function,\n b_eq=constraint_function_required_value,\n bounds=bounds).x\n\n return (xmin, xmax)\n\ndef compute_metamer_mismatch_body(\n observer_color_signal_Φ,\n observer_response_functions_Φ,\n observer_response_functions_Ψ,\n scene_illumination_Φ=equal_energy_illumination_vector,\n scene_illumination_Ψ=equal_energy_illumination_vector,\n sampling_resolution=100):\n\n assert_shape(observer_color_signal_Φ, (COLOR_DIMENSIONS,))\n assert_shape(observer_response_functions_Φ, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(observer_response_functions_Ψ, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(scene_illumination_Φ, (LIGHT_DIMENSIONS,))\n assert_shape(scene_illumination_Ψ, (LIGHT_DIMENSIONS,))\n\n color_signal_map_Φ = (observer_response_functions_Φ.T * scene_illumination_Φ).T\n color_signal_map_Ψ = (observer_response_functions_Ψ.T * scene_illumination_Ψ).T\n\n mismatch_body_extrema_points = []\n\n # iterate over a sampling of points on the unit sphere, interpreted as direction vectors\n # pointing in all directions.\n for direction_vector in sample_unit_sphere(sampling_resolution):\n\n # We assume the Euclidan Inner Product for our color vector space. Given that, a vector and its\n # functional (its covector) are identical. (This is why the euclidian dot product is a vector\n # matrix multiplied against itself)\n #\n # This functional can be thought of stacks of parallel\n # planes that are normal to `direction_vector`. Two of these planes will lie tangent to our metamer\n # mismatch body.\n direction_functional = direction_vector\n\n # compose the direction functional R3 -> R, with the color signal map to produce\n # a new funtional R31 -> R. \n ΨF = np.dot(color_signal_map_Ψ, direction_functional)\n\n # construct a linear programming problem\n # equation to minimize and maximize: \n # ΨF, a function from R31 -> R\n # ΨF returns the projection of some reflectance function in R31\n # onto the line in R3 represented by `direction_vector`\n #\n # constraints:\n # 1) constrain the solution set of reflectances to `0 > x_i <= 1`, this limits the solution to\n # physically realizable reflectances\n #\n # 2) constrain the solution set to `color_signal_map_Φ(x) = observer_color_signal_Φ`, \n # this limits the solution to metamers of `observer_color_signal_Φ`\n #\n # These are both convex sets. Their intersection is also a convex set, which is the metamer\n # Mismatch Body we are computing.\n #\n min_reflectance, max_reflectance = \\\n solve_linear_program(\n object_function_coefficents=ΨF,\n constraint_function=color_signal_map_Φ.T,\n constraint_function_required_value=observer_color_signal_Φ,\n bounds=(0,1))\n\n # # inline-test: these two reflectences should be metamers of `observer_color_signal_Φ`\n # almost_equal(observer_color_signal_Φ, np.dot(color_signal_map_Φ.T, min_reflectance), decimal=2)\n # almost_equal(observer_color_signal_Φ, np.dot(color_signal_map_Φ.T, max_reflectance), decimal=2)\n \n min_color_signal_Ψ = np.dot(color_signal_map_Ψ.T, min_reflectance) \n max_color_signal_Ψ = np.dot(color_signal_map_Ψ.T, max_reflectance)\n\n mismatch_body_extrema_points.extend([min_color_signal_Ψ, max_color_signal_Ψ])\n\n # scale the resulting body so that the brightest illuminant color response == 1\n scale_factor = np.max(np.dot(observer_response_functions_Ψ.T, scene_illumination_Ψ))\n\n return [p/scale_factor for p in mismatch_body_extrema_points]\n\ndef compute_object_color_solid(\n observer_response_functions,\n scene_illumination=equal_energy_illumination_vector,\n sampling_resolution=100):\n \"\"\"\n The linear programming formulation of the OCS is identical to that of the MMB minus\n the constraints related to the second observer.\n\n An MMB is a product of two observers but the OCS is simply the set of all object colors\n for a single observer.\n\n \"Computing the object colour solid using spherical sampling\"\n https://ueaeprints.uea.ac.uk/62975/\n \"\"\"\n assert_shape(observer_response_functions, (LIGHT_DIMENSIONS, COLOR_DIMENSIONS))\n assert_shape(scene_illumination, (LIGHT_DIMENSIONS,))\n\n color_signal_map = (observer_response_functions.T * scene_illumination).T\n\n ocs_extrema_points = []\n\n # iterate over a sampling of points on the unit sphere, interpreted as direction vectors\n # pointing in all directions.\n for direction_vector in sample_unit_sphere(sampling_resolution):\n\n direction_functional = direction_vector\n\n # compose the direction functional R3 -> R, with the color signal map to produce\n # a new funtional R31 -> R. \n ΨF = np.dot(color_signal_map, direction_functional)\n\n min_reflectance, max_reflectance = \\\n solve_linear_program(\n object_function_coefficents=ΨF,\n bounds=(0,1))\n\n min_color_signal = np.dot(color_signal_map.T, min_reflectance) \n max_color_signal = np.dot(color_signal_map.T, max_reflectance)\n\n ocs_extrema_points.extend([min_color_signal, max_color_signal])\n\n # scale the resulting body so that the brightest illuminant color response == 1\n scale_factor = np.max(np.dot(observer_response_functions.T, scene_illumination))\n\n return [p/scale_factor for p in ocs_extrema_points]\n"
] | [
[
"numpy.dot",
"numpy.random.randn",
"numpy.linalg.norm",
"scipy.optimize.linprog"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
schoppmp/iree | [
"d573c3dbb4eef8044764ae6d80ca79e37e8de522",
"d573c3dbb4eef8044764ae6d80ca79e37e8de522",
"d573c3dbb4eef8044764ae6d80ca79e37e8de522"
] | [
"bindings/python/tests/testdata/generate_tflite.py",
"integrations/tensorflow/e2e/einsum_dynamic_test.py",
"integrations/tensorflow/e2e/range_test.py"
] | [
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\n\nimport tensorflow as tf\n\n\nclass Squared(tf.Module):\n\n @tf.function\n def __call__(self, x):\n return tf.square(x)\n\n\nmodel = Squared()\nconcrete_func = model.__call__.get_concrete_function(\n tf.TensorSpec(shape=[4, 3], dtype=tf.float32))\n\nconverter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func])\ntflite_model = converter.convert()\n\nthis_dir = os.path.dirname(__file__)\nwith open(os.path.join(this_dir, \"tflite_sample.fb\"), \"wb\") as f:\n f.write(tflite_model)\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test matrix ops via einsum\"\"\"\n\nfrom iree.tf.support import tf_test_utils\nfrom iree.tf.support import tf_utils\nimport tensorflow.compat.v2 as tf\n\nLEFT_DIM = 6\nINNER_DIM = 3\nRIGHT_DIM = 6\nBATCH_DIM = 8\n\n\nclass EinsumDynamicModule(tf.Module):\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None], tf.float32),\n ])\n def einsum_dynamic_dim_identity(self, x):\n return tf.einsum('ij', x)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None, None], tf.float32),\n ])\n def einsum_dynamic_rank_identity(self, x):\n return tf.einsum('...', x)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, LEFT_DIM, RIGHT_DIM], tf.float32),\n ])\n def einsum_dynamic_dim_transpose(self, x):\n return tf.einsum('bij -> bji', x)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None, LEFT_DIM, RIGHT_DIM], tf.float32),\n ])\n def einsum_dynamic_rank_diag(self, x):\n return tf.einsum('...ii -> ...i', x)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None, LEFT_DIM, RIGHT_DIM], tf.float32),\n ])\n def einsum_dynamic_dim_sum(self, x):\n return tf.einsum('abij -> ab', x)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None], tf.float32),\n tf.TensorSpec([None, None], tf.float32),\n ])\n def einsum_dynamic_dim_matmul(self, lhs, rhs):\n return tf.einsum('ij, jk -> ik', lhs, rhs)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, LEFT_DIM, INNER_DIM], tf.float32),\n tf.TensorSpec([INNER_DIM, RIGHT_DIM], tf.float32),\n ])\n def einsum_dynamic_dim_lhs_batch(self, lhs, rhs):\n return tf.einsum('bij, jk -> bik', lhs, rhs)\n\n @tf.function(input_signature=[\n tf.TensorSpec([None, None, 8, 6], tf.float32),\n tf.TensorSpec([12, 6, 4], tf.float32),\n ])\n def einsum_dynamic_rank_split_heads(self, seq, weights):\n # l: seq_len, m: d_model, h: num_heads, d: attention_depth\n return tf.einsum('...lm, hmd -> ...hld', seq, weights)\n\n\nclass EinsumDynamicTest(tf_test_utils.TracedModuleTestCase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._modules = tf_test_utils.compile_tf_module(EinsumDynamicModule)\n\n # yapf: disable\n def test_einsum_dynamic_dim_identity(self):\n def einsum_dynamic_dim_identity(module):\n module.einsum_dynamic_dim_identity(\n tf_utils.ndarange([LEFT_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_dim_identity, self._modules)\n\n def test_einsum_dynamic_rank_identity(self):\n def einsum_dynamic_rank_identity(module):\n module.einsum_dynamic_rank_identity(\n tf_utils.ndarange([BATCH_DIM, LEFT_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_rank_identity, self._modules)\n\n def test_einsum_dynamic_dim_transpose(self):\n def einsum_dynamic_dim_transpose(module):\n module.einsum_dynamic_dim_transpose(\n tf_utils.ndarange([BATCH_DIM, LEFT_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_dim_transpose, self._modules)\n\n def test_einsum_dynamic_rank_diag(self):\n def einsum_dynamic_rank_diag(module):\n module.einsum_dynamic_rank_diag(\n tf_utils.ndarange([BATCH_DIM, BATCH_DIM, LEFT_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_rank_diag, self._modules)\n\n def test_einsum_dynamic_dim_sum(self):\n def einsum_dynamic_dim_sum(module):\n module.einsum_dynamic_dim_sum(\n tf_utils.ndarange([BATCH_DIM, BATCH_DIM, LEFT_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_dim_sum, self._modules)\n\n def test_einsum_dynamic_dim_matmul(self):\n def einsum_dynamic_dim_matmul(module):\n module.einsum_dynamic_dim_matmul(\n tf_utils.ndarange([LEFT_DIM, INNER_DIM]),\n tf_utils.ndarange([INNER_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_dim_matmul, self._modules)\n\n def test_einsum_dynamic_dim_lhs_batch(self):\n def einsum_dynamic_dim_lhs_batch(module):\n module.einsum_dynamic_dim_lhs_batch(\n tf_utils.ndarange([BATCH_DIM, LEFT_DIM, INNER_DIM]),\n tf_utils.ndarange([INNER_DIM, RIGHT_DIM]))\n self.compare_backends(einsum_dynamic_dim_lhs_batch, self._modules)\n\n def test_einsum_dynamic_rank_split_heads(self):\n def einsum_dynamic_rank_split_heads(module):\n module.einsum_dynamic_rank_split_heads(\n tf_utils.ndarange([BATCH_DIM, BATCH_DIM, 8, 6]),\n tf_utils.ndarange([12, 6, 4]))\n self.compare_backends(einsum_dynamic_rank_split_heads, self._modules)\n # yapf: enable\n\n\nif __name__ == \"__main__\":\n if hasattr(tf, \"enable_v2_behavior\"):\n tf.enable_v2_behavior()\n tf.test.main()\n",
"# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom absl import app\nfrom iree.tf.support import tf_test_utils\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\nclass RangeModule(tf.Module):\n\n def __init__(self):\n pass\n\n @tf.function(input_signature=[\n tf.TensorSpec([], tf.float32),\n tf.TensorSpec([], tf.float32),\n tf.TensorSpec([], tf.float32)\n ])\n def range(self, start, stop, delta):\n return tf.range(start, stop, delta)\n\n\nclass RangeTest(tf_test_utils.TracedModuleTestCase):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._modules = tf_test_utils.compile_tf_module(RangeModule)\n\n def test_range(self):\n\n def range(module):\n start = np.array(3., dtype=np.float32)\n stop = np.array(12., dtype=np.float32)\n delta = np.array(3, dtype=np.float32)\n result = module.range(start, stop, delta)\n\n self.compare_backends(range, self._modules)\n\n\ndef main(argv):\n del argv # Unused\n if hasattr(tf, 'enable_v2_behavior'):\n tf.enable_v2_behavior()\n tf.test.main()\n\n\nif __name__ == '__main__':\n app.run(main)\n"
] | [
[
"tensorflow.square",
"tensorflow.TensorSpec",
"tensorflow.lite.TFLiteConverter.from_concrete_functions"
],
[
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.einsum"
],
[
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.enable_v2_behavior",
"tensorflow.compat.v2.range",
"numpy.array",
"tensorflow.compat.v2.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JoaoVicente129/GamestonkTerminal | [
"5f744f8ff3f034e436d0629fb530ffd41b86e6d9"
] | [
"gamestonk_terminal/behavioural_analysis/reddit_view.py"
] | [
"import argparse\nimport warnings\nimport pandas as pd\nfrom prawcore.exceptions import ResponseException\nfrom requests import HTTPError\nfrom psaw import PushshiftAPI\nimport praw\nimport finviz\nfrom gamestonk_terminal.helper_funcs import check_positive, parse_known_args_and_warn\nfrom gamestonk_terminal import config_terminal as cfg\nfrom gamestonk_terminal.reddit_helpers import (\n print_and_record_reddit_post,\n find_tickers,\n)\n\n\ndef watchlist(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"watchlist\",\n description=\"\"\"Print other users watchlist. [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=5,\n help=\"limit of posts with watchlists retrieved.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n # dt_last_time_market_close = get_last_time_market_was_open(\n # datetime.now() - timedelta(hours=24)\n # )\n # n_ts_after = int(dt_last_time_market_close.timestamp())\n psaw_api = PushshiftAPI()\n submissions = psaw_api.search_submissions(\n # after=n_ts_after,\n subreddit=l_sub_reddits,\n q=\"WATCHLIST|Watchlist|watchlist\",\n filter=[\"id\"],\n )\n\n n_flair_posts_found = 0\n while True:\n try:\n submission = next(submissions, None)\n\n # Check if search_submissions didn't get anymore posts\n if not submission:\n break\n\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Increment count of valid posts found\n n_flair_posts_found += 1\n\n # Check if number of wanted posts found has been reached\n if n_flair_posts_found > ns_parser.n_limit - 1:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if n_flair_posts_found:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous watchlists:\"\n )\n print(s_watchlist_tickers[:-2] + \"\\n\")\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n print(\"\")\n\n\ndef popular_tickers(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"popular\",\n description=\"\"\"Print latest popular tickers. [Source: Reddit] \"\"\",\n )\n parser.add_argument(\n \"-n\",\n \"--number\",\n action=\"store\",\n dest=\"n_top\",\n type=check_positive,\n default=10,\n help=\"display top N tickers\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=50,\n help=\"limit of posts retrieved per sub reddit.\",\n )\n parser.add_argument(\n \"-s\",\n \"--sub\",\n action=\"store\",\n dest=\"s_subreddit\",\n type=str,\n help=\"\"\"\n subreddits to look for tickers, e.g. pennystocks,stocks.\n Default: pennystocks, RobinHoodPennyStocks, Daytrading, StockMarket, stocks, investing,\n wallstreetbets\n \"\"\",\n )\n \"\"\"\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=1,\n help=\"look for the tickers from those n past days.\",\n )\n \"\"\"\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # n_ts_after = int(\n # (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp()\n # )\n\n if ns_parser.s_subreddit:\n if \",\" in ns_parser.s_subreddit:\n l_sub_reddits = ns_parser.s_subreddit.split(\",\")\n else:\n l_sub_reddits = [ns_parser.s_subreddit]\n else:\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n # d_submission = {}\n d_watchlist_tickers = {}\n # l_watchlist_links = list()\n l_watchlist_author = list()\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n psaw_api = PushshiftAPI()\n\n for s_sub_reddit in l_sub_reddits:\n print(\n f\"Search for latest tickers under {ns_parser.n_limit} '{s_sub_reddit}' posts\"\n )\n submissions = psaw_api.search_submissions(\n # after=int(n_ts_after),\n subreddit=s_sub_reddit,\n limit=ns_parser.n_limit,\n filter=[\"id\"],\n )\n\n n_tickers = 0\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's content\n if (\n not submission.removed_by_category\n and (submission.selftext or submission.title)\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n n_tickers += len(l_tickers_found)\n\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n print(f\" {n_tickers} potential tickers found.\")\n\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n\n if lt_watchlist_sorted:\n n_top_stocks = 0\n # pylint: disable=redefined-outer-name\n popular_tickers = []\n for t_ticker in lt_watchlist_sorted:\n if n_top_stocks > ns_parser.n_top:\n break\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n stock_info = finviz.get_stock(t_ticker[0])\n popular_tickers.append(\n (\n t_ticker[1],\n t_ticker[0],\n stock_info[\"Company\"],\n stock_info[\"Sector\"],\n stock_info[\"Price\"],\n stock_info[\"Change\"],\n stock_info[\"Perf Month\"],\n f\"https://finviz.com/quote.ashx?t={t_ticker[0]}\",\n )\n )\n n_top_stocks += 1\n except HTTPError as e:\n if e.response.status_code != 404:\n print(f\"Unexpected exception from Finviz: {e}\")\n except Exception as e:\n print(e, \"\\n\")\n return\n\n popular_tickers_df = pd.DataFrame(\n popular_tickers,\n columns=[\n \"Mentions\",\n \"Ticker\",\n \"Company\",\n \"Sector\",\n \"Price\",\n \"Change\",\n \"Perf Month\",\n \"URL\",\n ],\n )\n\n print(f\"\\nThe following TOP {ns_parser.n_top} tickers have been mentioned:\")\n\n print(popular_tickers_df, \"\\n\")\n else:\n print(\"No tickers found\")\n\n print(\"\")\n\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n\ndef spac_community(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"spac_c\",\n description=\"\"\"Print other users SPACs announcement under subreddit 'SPACs' [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=10,\n help=\"limit of posts with SPACs retrieved\",\n )\n parser.add_argument(\n \"-p\",\n \"--popular\",\n action=\"store_true\",\n default=False,\n dest=\"b_popular\",\n help=\"popular flag, if true the posts retrieved are based on score rather than time\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n # psaw_api = PushshiftAPI()\n\n if ns_parser.b_popular:\n submissions = praw_api.subreddit(\"SPACs\").hot(limit=ns_parser.n_limit)\n else:\n submissions = praw_api.subreddit(\"SPACs\").new(limit=ns_parser.n_limit)\n\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if d_watchlist_tickers:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous SPACs:\"\n )\n print(s_watchlist_tickers[:-2])\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef spac(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"spac\",\n description=\"\"\" Show other users SPACs announcement [Reddit] \"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=5,\n help=\"limit of posts with SPACs retrieved.\",\n )\n \"\"\"\n parser.add_argument(\n \"-d\",\n \"--days\",\n action=\"store\",\n dest=\"n_days\",\n type=check_positive,\n default=5,\n help=\"look for the tickers from those n past days.\",\n )\n \"\"\"\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n d_watchlist_tickers = {}\n l_watchlist_links = list()\n l_watchlist_author = list()\n\n # n_ts_after = int(\n # (datetime.today() - timedelta(days=ns_parser.n_days)).timestamp()\n # )\n l_sub_reddits = [\n \"pennystocks\",\n \"RobinHoodPennyStocks\",\n \"Daytrading\",\n \"StockMarket\",\n \"stocks\",\n \"investing\",\n \"wallstreetbets\",\n ]\n\n warnings.filterwarnings(\"ignore\") # To avoid printing the warning\n psaw_api = PushshiftAPI()\n submissions = psaw_api.search_submissions(\n # after=n_ts_after,\n subreddit=l_sub_reddits,\n q=\"SPAC|Spac|spac|Spacs|spacs\",\n filter=[\"id\"],\n )\n n_flair_posts_found = 0\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if (\n not submission.removed_by_category\n and submission.selftext\n and submission.link_flair_text not in [\"Yolo\", \"Meme\"]\n and submission.author.name not in l_watchlist_author\n ):\n l_tickers_found = find_tickers(submission)\n\n if l_tickers_found:\n # Add another author's name to the parsed watchlists\n l_watchlist_author.append(submission.author.name)\n\n # Lookup stock tickers within a watchlist\n for key in l_tickers_found:\n if key in d_watchlist_tickers:\n # Increment stock ticker found\n d_watchlist_tickers[key] += 1\n else:\n # Initialize stock ticker found\n d_watchlist_tickers[key] = 1\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Increment count of valid posts found\n n_flair_posts_found += 1\n\n # Check if number of wanted posts found has been reached\n if n_flair_posts_found > ns_parser.n_limit - 1:\n break\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n\n if n_flair_posts_found:\n lt_watchlist_sorted = sorted(\n d_watchlist_tickers.items(), key=lambda item: item[1], reverse=True\n )\n s_watchlist_tickers = \"\"\n n_tickers = 0\n for t_ticker in lt_watchlist_sorted:\n try:\n # If try doesn't trigger exception, it means that this stock exists on finviz\n # thus we can print it.\n finviz.get_stock(t_ticker[0])\n if int(t_ticker[1]) > 1:\n s_watchlist_tickers += f\"{t_ticker[1]} {t_ticker[0]}, \"\n n_tickers += 1\n except Exception:\n # print(e, \"\\n\")\n pass\n if n_tickers:\n print(\n \"The following stock tickers have been mentioned more than once across the previous SPACs:\"\n )\n print(s_watchlist_tickers[:-2])\n print(\"\")\n\n except Exception as e:\n print(e, \"\\n\")\n\n\ndef wsb_community(l_args):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"wsb\",\n description=\"\"\"Print what WSB gang are up to in subreddit wallstreetbets. [Source: Reddit]\"\"\",\n )\n parser.add_argument(\n \"-l\",\n \"--limit\",\n action=\"store\",\n dest=\"n_limit\",\n type=check_positive,\n default=10,\n help=\"limit of posts to print.\",\n )\n parser.add_argument(\n \"-n\",\n \"--new\",\n action=\"store_true\",\n default=False,\n dest=\"b_new\",\n help=\"new flag, if true the posts retrieved are based on being more recent rather than their score.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n praw_api = praw.Reddit(\n client_id=cfg.API_REDDIT_CLIENT_ID,\n client_secret=cfg.API_REDDIT_CLIENT_SECRET,\n username=cfg.API_REDDIT_USERNAME,\n user_agent=cfg.API_REDDIT_USER_AGENT,\n password=cfg.API_REDDIT_PASSWORD,\n )\n\n d_submission = {}\n l_watchlist_links = list()\n\n # psaw_api = PushshiftAPI()\n\n if ns_parser.b_new:\n submissions = praw_api.subreddit(\"wallstreetbets\").new(\n limit=ns_parser.n_limit\n )\n else:\n submissions = praw_api.subreddit(\"wallstreetbets\").hot(\n limit=ns_parser.n_limit\n )\n while True:\n try:\n submission = next(submissions, None)\n if submission:\n # Get more information about post using PRAW api\n submission = praw_api.submission(id=submission.id)\n\n # Ensure that the post hasn't been removed by moderator in the meanwhile,\n # that there is a description and it's not just an image, that the flair is\n # meaningful, and that we aren't re-considering same author's watchlist\n if not submission.removed_by_category:\n\n l_watchlist_links.append(\n f\"https://old.reddit.com{submission.permalink}\"\n )\n\n print_and_record_reddit_post(d_submission, submission)\n\n # Check if search_submissions didn't get anymore posts\n else:\n break\n except ResponseException:\n print(\n \"Received a response from Reddit with an authorization error. check your token.\\n\"\n )\n return\n except Exception as e:\n print(e, \"\\n\")\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wann31828/Tacotron-2 | [
"d7161f950cd42c7a7fd36ec2aaac19ba60567876"
] | [
"tacotron/utils/plot.py"
] | [
"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nimport numpy as np\n\n\ndef split_title_line(title_text, max_words=5):\n\t\"\"\"\n\tA function that splits any string based on specific character\n\t(returning it with the string), with maximum number of words on it\n\t\"\"\"\n\tseq = title_text.split()\n\treturn '\\n'.join([' '.join(seq[i:i + max_words]) for i in range(0, len(seq), max_words)])\n\ndef plot_alignment(alignment, path, info=None, split_title=False, max_len=None):\n\tif max_len is not None:\n\t\talignment = alignment[:, :max_len]\n\n\tfig = plt.figure(figsize=(8, 6))\n\tax = fig.add_subplot(111)\n\n\tim = ax.imshow(\n\t\talignment,\n\t\taspect='auto',\n\t\torigin='lower',\n\t\tinterpolation='none')\n\tfig.colorbar(im, ax=ax)\n\txlabel = 'Decoder timestep'\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\tplt.xlabel(xlabel)\n\tplt.title(title)\n\tplt.ylabel('Encoder timestep')\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n\tplt.close()\n\n\ndef plot_spectrogram(pred_spectrogram, path, info=None, split_title=False, target_spectrogram=None, max_len=None, auto_aspect=False):\n\tif max_len is not None:\n\t\ttarget_spectrogram = target_spectrogram[:max_len]\n\t\tpred_spectrogram = pred_spectrogram[:max_len]\n\n\tif info is not None:\n\t\tif split_title:\n\t\t\ttitle = split_title_line(info)\n\t\telse:\n\t\t\ttitle = info\n\n\tfig = plt.figure(figsize=(10, 8))\n\t# Set common labels\n\tfig.text(0.5, 0.18, title, horizontalalignment='center', fontsize=16)\n\n\t#target spectrogram subplot\n\tif target_spectrogram is not None:\n\t\tax1 = fig.add_subplot(311)\n\t\tax2 = fig.add_subplot(312)\n\n\t\tif auto_aspect:\n\t\t\tim = ax1.imshow(np.rot90(target_spectrogram), aspect='auto', interpolation='none')\n\t\telse:\n\t\t\tim = ax1.imshow(np.rot90(target_spectrogram), interpolation='none')\n\t\tax1.set_title('Target Mel-Spectrogram')\n\t\tfig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax1)\n\t\tax2.set_title('Predicted Mel-Spectrogram')\n\telse:\n\t\tax2 = fig.add_subplot(211)\n\n\tif auto_aspect:\n\t\tim = ax2.imshow(np.rot90(pred_spectrogram), aspect='auto', interpolation='none')\n\telse:\n\t\tim = ax2.imshow(np.rot90(pred_spectrogram), interpolation='none')\n\tfig.colorbar(mappable=im, shrink=0.65, orientation='horizontal', ax=ax2)\n\n\tplt.tight_layout()\n\tplt.savefig(path, format='png')\n\tplt.close()\n"
] | [
[
"numpy.rot90",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.title",
"matplotlib.use",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
suryakantpandey/opencv | [
"2a52e44bc605bf73502dee0e0e3954bcda19a246"
] | [
"modules/calib3d/misc/python/test/test_solvepnp.py"
] | [
"#!/usr/bin/env python\n# Python 2/3 compatibility\nfrom __future__ import print_function\n\nimport numpy as np\nimport cv2 as cv\n\nfrom tests_common import NewOpenCVTests\n\nclass solvepnp_test(NewOpenCVTests):\n\n def test_regression_16040(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[700, 400], [700, 600], [900, 600], [900, 400]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n r = np.array([], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r\n )\n\n def test_regression_16040_2(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[[700, 400], [700, 600], [900, 600], [900, 400]]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n r = np.array([], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs, reprojectionError=r\n )\n\n def test_regression_16049(self):\n obj_points = np.array([[0, 0, 0], [0, 1, 0], [1, 1, 0], [1, 0, 0]], dtype=np.float32)\n img_points = np.array(\n [[[700, 400], [700, 600], [900, 600], [900, 400]]], dtype=np.float32\n )\n\n cameraMatrix = np.array(\n [[712.0634, 0, 800], [0, 712.540, 500], [0, 0, 1]], dtype=np.float32\n )\n distCoeffs = np.array([[0, 0, 0, 0]], dtype=np.float32)\n x, r, t, e = cv.solvePnPGeneric(\n obj_points, img_points, cameraMatrix, distCoeffs\n )\n if e is None:\n # noArray() is supported, see https://github.com/opencv/opencv/issues/16049\n pass\n else:\n eDump = cv.utils.dumpInputArray(e)\n self.assertEqual(eDump, \"InputArray: empty()=false kind=0x00010000 flags=0x01010000 total(-1)=1 dims(-1)=2 size(-1)=1x1 type(-1)=CV_32FC1\")\n\n\nif __name__ == '__main__':\n NewOpenCVTests.bootstrap()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jrkoenig/folseparators | [
"7b35a1e5fc27709adcc647528bf8820201408966"
] | [
"experiments/make_charts.py"
] | [
"# Copyright 2020 Stanford University\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse, random, json, os\nimport numpy as np\nimport pandas as pd\nimport matplotlib\nmatplotlib.use(\"Agg\")\n\nimport matplotlib.pyplot as plt\n\n\nimport seaborn as sns\nfrom collections import Counter\nfrom typing import *\nimport typing\n\ndef int_bins(x: List[int]) -> List[float]:\n l,h = min(x),max(x)\n return [x - 0.5 for x in range(l,h+2)]\ndef intdistplot(x: Any, **kwargs: Any) -> Any:\n return sns.distplot(x, bins = int_bins(x), **kwargs)\n\n\ndef main() -> None:\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"results\", type=argparse.FileType('r'))\n parser.add_argument(\"--description\", type=argparse.FileType('r'), default = \"conjectures/benchmark.json\")\n parser.add_argument(\"--output\", \"-o\", type=str, default = \"out/charts\")\n args = parser.parse_args()\n\n\n try:\n os.makedirs(args.output, exist_ok=True)\n os.chdir(args.output)\n except OSError as e:\n print(e)\n return\n\n sns.set(style=\"white\", palette=\"muted\", color_codes=True)\n font = {'size':16, 'family':'serif', 'serif': ['CMU Serif']}\n plt.rc('font', **font)\n plt.rc('mathtext', fontset='cm')\n plt.rc('axes', labelsize='medium')\n plt.rc('xtick', labelsize='medium')\n plt.rc('ytick', labelsize='medium')\n plt.rc('legend', fontsize='medium')\n\n descs = json.load(args.description)\n desc_by_id = {}\n for d in descs:\n desc_by_id[(d['base'], d['conjecture'])] = d\n def desc_of(r: Dict) -> Dict:\n return desc_by_id[(r['base'], r['conjecture'])]\n results = json.load(args.results)\n results = [r for r in results if r['base'] != 'tlb-safety']\n\n summary_file = open(\"summary.txt\", \"w\")\n def _print(*args: Any) -> None:\n print(*args, file=summary_file)\n\n # print(\"Random Formula:\")\n # sample = random.sample(results, 5)\n # for r in sample:\n # print(desc_by_id[(r['base'], r['conjecture'])]['golden_formula'])\n # print(\"\")\n\n # print(\"There are {} protocols\".format(len(set([r['base'] for r in results]))))\n\n\n\n #intdistplot([d['quantifiers'] for d in descs], axlabel=\"quantifier count\", kde=False).get_figure().savefig(\"quantifier_distribution.png\")\n \n # fig = plt.figure(figsize=(8,6))\n # ax = sns.countplot(data = pd.DataFrame([d['base'].replace(\"_\", \" \").replace(\"-\", \" \") for d in descs]), color=\"c\", y = 0, orient='h')\n # ax.set_ylabel(\"protocol\")\n # ax.set_xlabel(\"number of conjuncts\")\n # plt.subplots_adjust(left=0.5)\n # fig.suptitle(\"Distribution of conjucts over protocols\")\n # plt.savefig(\"conjunct_distribution.png\")\n _print(f\"Total CPU hours {sum(r['stats']['total_time'] for r in results)/3600:.0f}\\n\")\n\n for r in results:\n if r['killed']: continue\n if r['stats']['formula'] == 'false':\n print (r['base'], r['conjecture'])\n\n \n s: typing.Counter[str] = Counter()\n f: typing.Counter[str] = Counter()\n k: typing.Counter[str] = Counter()\n for r in results:\n if r['success']:\n s[r['base']] += 1\n elif r['killed']:\n k[r['base']] += 1\n else:\n f[r['base']] += 1\n\n\n # fig = plt.figure(figsize=(8,6))\n # plt.subplots_adjust(left=0.5)\n # ax = plt.axes()\n # labels = list(sorted(set(s.keys()) | set(f.keys()) | set(k.keys())))\n # labels.reverse()\n # plt.barh(range(len(labels)), list(1 for l in labels), color='#319b7c', linewidth=0)\n # plt.barh(range(len(labels)), list(((k[l]+f[l])/float(s[l]+f[l]+k[l]) for l in labels)), color='#fdce4b', linewidth=0)\n # plt.barh(range(len(labels)), list((k[l]/float(s[l]+f[l]+k[l]) for l in labels)), color='#e44033', linewidth=0)\n # plt.yticks(range(len(labels)), labels)\n # plt.xlim(0,1)\n # ax.spines['top'].set_visible(False)\n # ax.spines['right'].set_visible(False)\n # ax.spines['bottom'].set_visible(False)\n # ax.spines['left'].set_visible(False)\n # fig.suptitle(\"Success rate by protocol (normalized)\")\n # plt.savefig(\"success_by_protocol.png\")\n\n missing_experiments = set((d['base'], d['conjecture']) for d in descs) - set((d['base'], d['conjecture']) for d in results)\n if len(missing_experiments) > 0:\n _print(f\"Missing {len(missing_experiments)} results from benchmark\")\n _print(\"Results count: \", len(results), \"{}/{}/{} succ/kill/fail\".format(sum(s.values()),sum(k.values()), sum(f.values())))\n _print(f\"Success rate: {sum(s.values())/len(results)*100.0:0.1f}\" )\n \n # print (\"\\nProb Succ Killed Failed\")\n # for l in labels:\n # print(l, s[l], k[l], f[l])\n # print(\"\")\n\n fig = plt.figure(figsize=(6,4))\n \n s = Counter()\n f = Counter()\n k = Counter()\n for r in results:\n golden_quant_count = desc_by_id[r['base'], r['conjecture']]['quantifiers']\n if r['success']:\n s[golden_quant_count] += 1\n elif r['killed']:\n k[golden_quant_count] += 1\n else:\n f[golden_quant_count] += 1\n\n ax = plt.axes()\n labels = list(sorted(set(s.keys()) | set(k.keys()) | set(f.keys())))\n plt.bar(range(len(labels)), list(k[l]+f[l]+s[l] for l in labels), edgecolor='0', color='#FFFFFF', linewidth=0.5, clip_on=False)\n plt.bar(range(len(labels)), list(k[l]+f[l] for l in labels), color='#444444',edgecolor='#444444', linewidth=0.5)\n #plt.bar(range(len(labels)), list(k[l] for l in labels), color='#e44033', linewidth=0)\n plt.xticks(range(len(labels)), labels)\n plt.ylim(0,None)\n \n plt.xlabel(\"Number of quantifiers in golden formula\")\n ax.tick_params(axis='y', left=True, width=0.5)\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n ax.legend(['Success', 'Failure'], frameon=False)\n #fig.suptitle(\"Quantifier conjunct distribution with success rate\")\n plt.savefig(\"success_by_quantifier_count.eps\", bbox_inches='tight')\n plt.savefig(\"success_by_quantifier_count.png\", bbox_inches='tight')\n\n _print(\"\\nQuant. @ success @ killed @ failed\")\n for l in labels:\n _print (l, \"@\", s[l],\"@\", k[l],\"@\", f[l])\n _print(\"\\n\")\n\n fig = plt.figure(figsize=(6,4))\n times = []\n for r in results:\n if r['success']:\n times.append(r['stats']['total_time'])\n else:\n times.append(float('Inf'))\n times.sort()\n ax = plt.axes()\n plt.plot([x+0.5 for x in range(len(times))], times, color='black')\n plt.yscale(\"log\")\n plt.xlim(0,len(times))\n plt.ylim(1,3600)\n plt.ylabel(\"Time to learn (sec)\")\n plt.xlabel(\"Conjecture (ordinal)\")\n #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n plt.savefig(\"ordinal_learning_times.eps\", bbox_inches='tight')\n plt.savefig(\"ordinal_learning_times.png\", bbox_inches='tight')\n\n # fig = plt.figure(figsize=(6,4))\n # xs = []\n # ys = []\n # for r in results:\n # if 'stats' in r:\n # xs.append(max(0.001, r['stats']['counterexample_time']/60.0))\n # ys.append(max(0.001, r['stats']['separation_time']/60.0))\n # times.sort()\n # ax = plt.axes()\n # #ax.set_aspect('equal', 'datalim')\n # plt.scatter(xs, ys, color='black')\n # plt.yscale(\"log\")\n # plt.ylim(0.001,10)\n # plt.xscale(\"log\")\n # plt.xlim(0.001,10)\n # plt.ylabel(\"Separation\")\n # plt.xlabel(\"Counterexample\")\n # #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n # plt.savefig(\"time_scatter.eps\", bbox_inches='tight')\n # plt.savefig(\"time_scatter.png\", bbox_inches='tight')\n\n c_to = 0\n s_to = 0\n for r in results:\n if 'stats' in r:\n if r['stats']['counterexample_time'] > r['timeout'] - 5:\n c_to += 1\n if r['stats']['separation_time'] > r['timeout'] - 5:\n s_to += 1\n _print(f\"counterexample timeout: {c_to}, separation timeout: {s_to}\")\n\n\n\n # fig = plt.figure(figsize=(6,4))\n # xs = []\n # ys = []\n # for r in results:\n # if 'stats' in r:\n # xs.append(r['stats']['separation_time'])\n # ys.append(r['stats']['matrix_time']/max(0.0001,r['stats']['separation_time']))\n # times.sort()\n # ax = plt.axes()\n # #ax.set_aspect('equal', 'datalim')\n # plt.scatter(xs, ys, color='black')\n # plt.ylim(0,1)\n # #plt.xscale(\"log\")\n # plt.ylabel(\"Matrix Fraction\")\n # plt.xlabel(\"Separation Time\")\n # #fig.suptitle(\"Ordinal chart of time to learn conjuncts\")\n # plt.savefig(\"matrix_percentage.eps\", bbox_inches='tight')\n # plt.savefig(\"matrix_percentage.png\", bbox_inches='tight')\n\n m_heavy = 0\n m_light = 0\n lower_limit = 200\n _print(\"\\nFormula with hard to infer matrices:\")\n for r in results:\n if 'stats' in r:\n if r['stats']['separation_time'] > lower_limit:\n if r['stats']['matrix_time'] > r['stats']['separation_time']*0.5:\n m_heavy += 1\n print(r['success'],\"\\t\", desc_of(r)['golden_formula'])\n else:\n m_light += 1\n _print(f\"For examples taking > {lower_limit} sec\")\n _print(f\"matrix >50%: {m_heavy}, matrix <=50%: {m_light}\")\n\n\n errors = []\n for r in results:\n if r['killed'] and d['max_term_depth'] <= 1:\n qc = desc_of(r)['quantifiers']\n gold = desc_of(r)['golden_formula']\n errors.append((qc, gold, r['base'] + \"-\" + r['conjecture']))\n errors.sort()\n _print(\"\\nKilled Conjuncts:\")\n for (q, gold, name) in errors:\n _print(name, q, gold)\n \n errors2 = []\n for r in results:\n if not r['killed'] and not r['success']:\n qc = desc_of(r)['quantifiers']\n gold = desc_of(r)['golden_formula']\n if 'stats' in r:\n x = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n else:\n x = 0.0\n errors2.append((qc, gold, r['base'] + \"-\" + r['conjecture'], x, r))\n errors2.sort()\n _print(\"\\nFailed Conjuncts(counter frac, matrix frac, quants, name, error):\")\n # for (q, gold, name, x, r) in errors:\n # print(f\"{x:0.2f} {name}\",\"@\", q, \"@\", gold)\n for (q, gold, name, x, r) in errors2:\n if 'stats' in r:\n c_frac = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n m_frac = min(1, r['stats']['matrix_time']/max(0.001, r['stats']['separation_time']))\n error = r['stats']['error']\n else:\n c_frac = 0.0\n m_frac = 0.0\n error = \"?\"\n _print(f\"{c_frac:0.2f}\\t{m_frac:0.2f}\\t{q}\\t{name}\\t{error}\\t{r['stats']['formula_quantifiers']}\")\n \n\n _print(\"\\nFailed Conjuncts(counter frac, matrix frac, quants, name, error):\")\n # for (q, gold, name, x, r) in errors:\n # print(f\"{x:0.2f} {name}\",\"@\", q, \"@\", gold)\n cc: typing.Counter[Tuple[bool, bool]] = Counter()\n for (q, gold, name, x, r) in errors2:\n if 'stats' in r:\n c_frac = min(1, r['stats']['counterexample_time']/float(r['timeout']))\n m_frac = min(1, r['stats']['matrix_time']/max(0.001, r['stats']['separation_time']))\n error = r['stats']['error']\n else:\n c_frac = 0.0\n m_frac = 0.0\n error = \"?\"\n reason_c = c_frac >= 0.99 or error.startswith(\"Z3\")\n reason_m = not reason_c and m_frac >= 0.95\n cc[(not reason_c, not reason_m)] += 1\n if not reason_c and not reason_m:\n _print(f\"{c_frac:0.2f}\\t{m_frac:0.2f}\\t{q}\\t{name}\\t{error}\")\n for kk,v in cc.items():\n (counter, matrix) = kk\n _print(f\"{('c < 0.99' if counter else 'c >= 0.99')}, {('m < 0.95' if matrix else 'm >= 0.95')}: {v}\") \n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thinhnd2104/dask | [
"995c9dcd47ec040564a66d399fffea18e3dac597"
] | [
"dask/dataframe/shuffle.py"
] | [
"import contextlib\nimport logging\nimport math\nimport shutil\nimport tempfile\nimport uuid\n\nimport numpy as np\nimport pandas as pd\nimport tlz as toolz\n\nfrom .. import base, config\nfrom ..base import compute, compute_as_if_collection, is_dask_collection, tokenize\nfrom ..highlevelgraph import HighLevelGraph\nfrom ..layers import ShuffleLayer, SimpleShuffleLayer\nfrom ..sizeof import sizeof\nfrom ..utils import M, digit\nfrom . import methods\nfrom .core import DataFrame, Series, _Frame, map_partitions, new_dd_object\nfrom .dispatch import group_split_dispatch, hash_object_dispatch\n\nlogger = logging.getLogger(__name__)\n\n\ndef _calculate_divisions(\n df,\n partition_col,\n repartition,\n npartitions,\n upsample=1.0,\n partition_size=128e6,\n):\n \"\"\"\n Utility function to calculate divisions for calls to `map_partitions`\n \"\"\"\n sizes = df.map_partitions(sizeof) if repartition else []\n divisions = partition_col._repartition_quantiles(npartitions, upsample=upsample)\n mins = partition_col.map_partitions(M.min)\n maxes = partition_col.map_partitions(M.max)\n divisions, sizes, mins, maxes = base.compute(divisions, sizes, mins, maxes)\n divisions = methods.tolist(divisions)\n if type(sizes) is not list:\n sizes = methods.tolist(sizes)\n mins = methods.tolist(mins)\n maxes = methods.tolist(maxes)\n\n empty_dataframe_detected = pd.isnull(divisions).all()\n if repartition or empty_dataframe_detected:\n total = sum(sizes)\n npartitions = max(math.ceil(total / partition_size), 1)\n npartitions = min(npartitions, df.npartitions)\n n = len(divisions)\n try:\n divisions = np.interp(\n x=np.linspace(0, n - 1, npartitions + 1),\n xp=np.linspace(0, n - 1, n),\n fp=divisions,\n ).tolist()\n except (TypeError, ValueError): # str type\n indexes = np.linspace(0, n - 1, npartitions + 1).astype(int)\n divisions = [divisions[i] for i in indexes]\n\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n if pd.api.types.is_categorical_dtype(partition_col.dtype):\n dtype = partition_col.dtype\n mins = pd.Categorical(mins, dtype=dtype).codes.tolist()\n maxes = pd.Categorical(maxes, dtype=dtype).codes.tolist()\n\n return divisions, mins, maxes\n\n\ndef sort_values(\n df,\n by,\n npartitions=None,\n ascending=True,\n upsample=1.0,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\"See DataFrame.sort_values for docstring\"\"\"\n if not isinstance(by, str):\n # support [\"a\"] as input\n if isinstance(by, list) and len(by) == 1 and isinstance(by[0], str):\n by = by[0]\n else:\n raise NotImplementedError(\n \"Dataframe only supports sorting by a single column which must \"\n \"be passed as a string or a list of a single string.\\n\"\n \"You passed %s\" % str(by)\n )\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n sort_by_col = df[by]\n\n divisions, mins, maxes = _calculate_divisions(\n df, sort_by_col, repartition, npartitions, upsample, partition_size\n )\n\n if (\n mins == sorted(mins, reverse=not ascending)\n and maxes == sorted(maxes, reverse=not ascending)\n and all(\n mx < mn\n for mx, mn in zip(\n maxes[:-1] if ascending else maxes[1:],\n mins[1:] if ascending else mins[:-1],\n )\n )\n and npartitions == df.npartitions\n ):\n # divisions are in the right place\n return df.map_partitions(M.sort_values, by, ascending=ascending)\n\n df = rearrange_by_divisions(df, by, divisions, ascending=ascending)\n df = df.map_partitions(M.sort_values, by, ascending=ascending)\n return df\n\n\ndef set_index(\n df,\n index,\n npartitions=None,\n shuffle=None,\n compute=False,\n drop=True,\n upsample=1.0,\n divisions=None,\n partition_size=128e6,\n **kwargs,\n):\n \"\"\"See _Frame.set_index for docstring\"\"\"\n if isinstance(index, Series) and index._name == df.index._name:\n return df\n if isinstance(index, (DataFrame, tuple, list)):\n # Accept [\"a\"], but not [[\"a\"]]\n if (\n isinstance(index, list)\n and len(index) == 1\n and not isinstance(index[0], list) # if index = [[\"a\"]], leave it that way\n ):\n index = index[0]\n else:\n raise NotImplementedError(\n \"Dask dataframe does not yet support multi-indexes.\\n\"\n \"You tried to index with this index: %s\\n\"\n \"Indexes must be single columns only.\" % str(index)\n )\n\n if npartitions == \"auto\":\n repartition = True\n npartitions = max(100, df.npartitions)\n else:\n if npartitions is None:\n npartitions = df.npartitions\n repartition = False\n\n if not isinstance(index, Series):\n index2 = df[index]\n else:\n index2 = index\n\n if divisions is None:\n divisions, mins, maxes = _calculate_divisions(\n df, index2, repartition, npartitions, upsample, partition_size\n )\n\n if (\n mins == sorted(mins)\n and maxes == sorted(maxes)\n and all(mx < mn for mx, mn in zip(maxes[:-1], mins[1:]))\n and npartitions == df.npartitions\n ):\n divisions = mins + [maxes[-1]]\n result = set_sorted_index(df, index, drop=drop, divisions=divisions)\n return result.map_partitions(M.sort_index)\n\n return set_partition(\n df, index, divisions, shuffle=shuffle, drop=drop, compute=compute, **kwargs\n )\n\n\ndef remove_nans(divisions):\n \"\"\"Remove nans from divisions\n\n These sometime pop up when we call min/max on an empty partition\n\n Examples\n --------\n >>> remove_nans((np.nan, 1, 2))\n [1, 1, 2]\n >>> remove_nans((1, np.nan, 2))\n [1, 2, 2]\n >>> remove_nans((1, 2, np.nan))\n [1, 2, 2]\n \"\"\"\n divisions = list(divisions)\n\n for i in range(len(divisions) - 2, -1, -1):\n if pd.isnull(divisions[i]):\n divisions[i] = divisions[i + 1]\n\n for i in range(len(divisions) - 1, -1, -1):\n if not pd.isnull(divisions[i]):\n for j in range(i + 1, len(divisions)):\n divisions[j] = divisions[i]\n break\n\n return divisions\n\n\ndef set_partition(\n df, index, divisions, max_branch=32, drop=True, shuffle=None, compute=None\n):\n \"\"\"Group DataFrame by index\n\n Sets a new index and partitions data along that index according to\n divisions. Divisions are often found by computing approximate quantiles.\n The function ``set_index`` will do both of these steps.\n\n Parameters\n ----------\n df: DataFrame/Series\n Data that we want to re-partition\n index: string or Series\n Column to become the new index\n divisions: list\n Values to form new divisions between partitions\n drop: bool, default True\n Whether to delete columns to be used as the new index\n shuffle: str (optional)\n Either 'disk' for an on-disk shuffle or 'tasks' to use the task\n scheduling framework. Use 'disk' if you are on a single machine\n and 'tasks' if you are on a distributed cluster.\n max_branch: int (optional)\n If using the task-based shuffle, the amount of splitting each\n partition undergoes. Increase this for fewer copies but more\n scheduler overhead.\n\n See Also\n --------\n set_index\n shuffle\n partd\n \"\"\"\n meta = df._meta._constructor_sliced([0])\n if isinstance(divisions, tuple):\n # pd.isna considers tuples to be scalars. Convert to a list.\n divisions = list(divisions)\n\n if np.isscalar(index):\n dtype = df[index].dtype\n else:\n dtype = index.dtype\n\n if pd.isna(divisions).any() and pd.api.types.is_integer_dtype(dtype):\n # Can't construct a Series[int64] when any / all of the divisions are NaN.\n divisions = df._meta._constructor_sliced(divisions)\n else:\n divisions = df._meta._constructor_sliced(divisions, dtype=dtype)\n\n if np.isscalar(index):\n partitions = df[index].map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n else:\n partitions = index.map_partitions(\n set_partitions_pre, divisions=divisions, meta=meta\n )\n df2 = df.assign(_partitions=partitions, _index=index)\n\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n compute=compute,\n ignore_index=True,\n )\n\n if np.isscalar(index):\n df4 = df3.map_partitions(\n set_index_post_scalar,\n index_name=index,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n else:\n df4 = df3.map_partitions(\n set_index_post_series,\n index_name=index.name,\n drop=drop,\n column_dtype=df.columns.dtype,\n )\n\n df4.divisions = methods.tolist(divisions)\n\n return df4.map_partitions(M.sort_index)\n\n\ndef shuffle(\n df,\n index,\n shuffle=None,\n npartitions=None,\n max_branch=32,\n ignore_index=False,\n compute=None,\n):\n \"\"\"Group DataFrame by index\n\n Hash grouping of elements. After this operation all elements that have\n the same index will be in the same partition. Note that this requires\n full dataset read, serialization and shuffle. This is expensive. If\n possible you should avoid shuffles.\n\n This does not preserve a meaningful index/partitioning scheme. This is not\n deterministic if done in parallel.\n\n See Also\n --------\n set_index\n set_partition\n shuffle_disk\n \"\"\"\n list_like = pd.api.types.is_list_like(index) and not is_dask_collection(index)\n if shuffle == \"tasks\" and (isinstance(index, str) or list_like):\n # Avoid creating the \"_partitions\" column if possible.\n # We currently do this if the user is passing in\n # specific column names (and shuffle == \"tasks\").\n if isinstance(index, str):\n index = [index]\n else:\n index = list(index)\n nset = set(index)\n if nset & set(df.columns) == nset:\n return rearrange_by_column(\n df,\n index,\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n ignore_index=ignore_index,\n compute=compute,\n )\n\n if not isinstance(index, _Frame):\n index = df._select_columns_or_index(index)\n elif hasattr(index, \"to_frame\"):\n # If this is an index, we should still convert to a\n # DataFrame. Otherwise, the hashed values of a column\n # selection will not match (important when merging).\n index = index.to_frame()\n\n partitions = index.map_partitions(\n partitioning_index,\n npartitions=npartitions or df.npartitions,\n meta=df._meta._constructor_sliced([0]),\n transform_divisions=False,\n )\n df2 = df.assign(_partitions=partitions)\n df2._meta.index.name = df._meta.index.name\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n npartitions=npartitions,\n max_branch=max_branch,\n shuffle=shuffle,\n compute=compute,\n ignore_index=ignore_index,\n )\n del df3[\"_partitions\"]\n return df3\n\n\ndef rearrange_by_divisions(\n df,\n column,\n divisions,\n max_branch=None,\n shuffle=None,\n ascending=True,\n):\n \"\"\"Shuffle dataframe so that column separates along divisions\"\"\"\n divisions = df._meta._constructor_sliced(divisions)\n meta = df._meta._constructor_sliced([0])\n # Assign target output partitions to every row\n partitions = df[column].map_partitions(\n set_partitions_pre, divisions=divisions, ascending=ascending, meta=meta\n )\n df2 = df.assign(_partitions=partitions)\n\n # Perform shuffle\n df3 = rearrange_by_column(\n df2,\n \"_partitions\",\n max_branch=max_branch,\n npartitions=len(divisions) - 1,\n shuffle=shuffle,\n )\n del df3[\"_partitions\"]\n return df3\n\n\ndef rearrange_by_column(\n df,\n col,\n npartitions=None,\n max_branch=None,\n shuffle=None,\n compute=None,\n ignore_index=False,\n):\n shuffle = shuffle or config.get(\"shuffle\", None) or \"disk\"\n\n # if the requested output partitions < input partitions\n # we repartition first as shuffling overhead is\n # proportionate to the number of input partitions\n\n if npartitions is not None and npartitions < df.npartitions:\n df = df.repartition(npartitions=npartitions)\n\n if shuffle == \"disk\":\n return rearrange_by_column_disk(df, col, npartitions, compute=compute)\n elif shuffle == \"tasks\":\n df2 = rearrange_by_column_tasks(\n df, col, max_branch, npartitions, ignore_index=ignore_index\n )\n if ignore_index:\n df2._meta = df2._meta.reset_index(drop=True)\n return df2\n else:\n raise NotImplementedError(\"Unknown shuffle method %s\" % shuffle)\n\n\nclass maybe_buffered_partd:\n \"\"\"\n If serialized, will return non-buffered partd. Otherwise returns a buffered partd\n \"\"\"\n\n def __init__(self, buffer=True, tempdir=None):\n self.tempdir = tempdir or config.get(\"temporary_directory\", None)\n self.buffer = buffer\n self.compression = config.get(\"dataframe.shuffle-compression\", None)\n\n def __reduce__(self):\n if self.tempdir:\n return (maybe_buffered_partd, (False, self.tempdir))\n else:\n return (maybe_buffered_partd, (False,))\n\n def __call__(self, *args, **kwargs):\n import partd\n\n path = tempfile.mkdtemp(suffix=\".partd\", dir=self.tempdir)\n\n try:\n partd_compression = (\n getattr(partd.compressed, self.compression)\n if self.compression\n else None\n )\n except AttributeError as e:\n raise ImportError(\n \"Not able to import and load {0} as compression algorithm.\"\n \"Please check if the library is installed and supported by Partd.\".format(\n self.compression\n )\n ) from e\n file = partd.File(path)\n partd.file.cleanup_files.append(path)\n # Envelope partd file with compression, if set and available\n if partd_compression:\n file = partd_compression(file)\n if self.buffer:\n return partd.PandasBlocks(partd.Buffer(partd.Dict(), file))\n else:\n return partd.PandasBlocks(file)\n\n\ndef rearrange_by_column_disk(df, column, npartitions=None, compute=False):\n \"\"\"Shuffle using local disk\n\n See Also\n --------\n rearrange_by_column_tasks:\n Same function, but using tasks rather than partd\n Has a more informative docstring\n \"\"\"\n if npartitions is None:\n npartitions = df.npartitions\n\n token = tokenize(df, column, npartitions)\n always_new_token = uuid.uuid1().hex\n\n p = (\"zpartd-\" + always_new_token,)\n dsk1 = {p: (maybe_buffered_partd(),)}\n\n # Partition data on disk\n name = \"shuffle-partition-\" + always_new_token\n dsk2 = {\n (name, i): (shuffle_group_3, key, column, npartitions, p)\n for i, key in enumerate(df.__dask_keys__())\n }\n\n dependencies = []\n if compute:\n graph = HighLevelGraph.merge(df.dask, dsk1, dsk2)\n graph = HighLevelGraph.from_collections(name, graph, dependencies=[df])\n keys = [p, sorted(dsk2)]\n pp, values = compute_as_if_collection(DataFrame, graph, keys)\n dsk1 = {p: pp}\n dsk2 = dict(zip(sorted(dsk2), values))\n else:\n dependencies.append(df)\n\n # Barrier\n barrier_token = \"barrier-\" + always_new_token\n dsk3 = {barrier_token: (barrier, list(dsk2))}\n\n # Collect groups\n name = \"shuffle-collect-\" + token\n dsk4 = {\n (name, i): (collect, p, i, df._meta, barrier_token) for i in range(npartitions)\n }\n\n divisions = (None,) * (npartitions + 1)\n\n layer = toolz.merge(dsk1, dsk2, dsk3, dsk4)\n graph = HighLevelGraph.from_collections(name, layer, dependencies=dependencies)\n return new_dd_object(graph, name, df._meta, divisions)\n\n\ndef _noop(x, cleanup_token):\n \"\"\"\n A task that does nothing.\n \"\"\"\n return x\n\n\ndef rearrange_by_column_tasks(\n df, column, max_branch=32, npartitions=None, ignore_index=False\n):\n \"\"\"Order divisions of DataFrame so that all values within column(s) align\n\n This enacts a task-based shuffle. It contains most of the tricky logic\n around the complex network of tasks. Typically before this function is\n called a new column, ``\"_partitions\"`` has been added to the dataframe,\n containing the output partition number of every row. This function\n produces a new dataframe where every row is in the proper partition. It\n accomplishes this by splitting each input partition into several pieces,\n and then concatenating pieces from different input partitions into output\n partitions. If there are enough partitions then it does this work in\n stages to avoid scheduling overhead.\n\n Lets explain the motivation for this further. Imagine that we have 1000\n input partitions and 1000 output partitions. In theory we could split each\n input into 1000 pieces, and then move the 1 000 000 resulting pieces\n around, and then concatenate them all into 1000 output groups. This would\n be fine, but the central scheduling overhead of 1 000 000 tasks would\n become a bottleneck. Instead we do this in stages so that we split each of\n the 1000 inputs into 30 pieces (we now have 30 000 pieces) move those\n around, concatenate back down to 1000, and then do the same process again.\n This has the same result as the full transfer, but now we've moved data\n twice (expensive) but done so with only 60 000 tasks (cheap).\n\n Note that the `column` input may correspond to a list of columns (rather\n than just a single column name). In this case, the `shuffle_group` and\n `shuffle_group_2` functions will use hashing to map each row to an output\n partition. This approach may require the same rows to be hased multiple\n times, but avoids the need to assign a new \"_partitions\" column.\n\n Parameters\n ----------\n df: dask.dataframe.DataFrame\n column: str or list\n A column name on which we want to split, commonly ``\"_partitions\"``\n which is assigned by functions upstream. This could also be a list of\n columns (in which case shuffle_group will create a hash array/column).\n max_branch: int\n The maximum number of splits per input partition. Defaults to 32.\n If there are more partitions than this then the shuffling will occur in\n stages in order to avoid creating npartitions**2 tasks\n Increasing this number increases scheduling overhead but decreases the\n number of full-dataset transfers that we have to make.\n npartitions: Optional[int]\n The desired number of output partitions\n\n Returns\n -------\n df3: dask.dataframe.DataFrame\n\n See also\n --------\n rearrange_by_column_disk: same operation, but uses partd\n rearrange_by_column: parent function that calls this or rearrange_by_column_disk\n shuffle_group: does the actual splitting per-partition\n \"\"\"\n\n max_branch = max_branch or 32\n\n if (npartitions or df.npartitions) <= max_branch:\n # We are creating a small number of output partitions.\n # No need for staged shuffling. Staged shuffling will\n # sometimes require extra work/communication in this case.\n token = tokenize(df, column, npartitions)\n shuffle_name = f\"simple-shuffle-{token}\"\n npartitions = npartitions or df.npartitions\n shuffle_layer = SimpleShuffleLayer(\n shuffle_name,\n column,\n npartitions,\n df.npartitions,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n shuffle_name, shuffle_layer, dependencies=[df]\n )\n return new_dd_object(graph, shuffle_name, df._meta, [None] * (npartitions + 1))\n\n n = df.npartitions\n stages = int(math.ceil(math.log(n) / math.log(max_branch)))\n if stages > 1:\n k = int(math.ceil(n ** (1 / stages)))\n else:\n k = n\n\n inputs = [tuple(digit(i, j, k) for j in range(stages)) for i in range(k ** stages)]\n\n npartitions_orig = df.npartitions\n token = tokenize(df, stages, column, n, k)\n for stage in range(stages):\n stage_name = f\"shuffle-{stage}-{token}\"\n stage_layer = ShuffleLayer(\n stage_name,\n column,\n inputs,\n stage,\n npartitions,\n n,\n k,\n ignore_index,\n df._name,\n df._meta,\n )\n graph = HighLevelGraph.from_collections(\n stage_name, stage_layer, dependencies=[df]\n )\n df = new_dd_object(graph, stage_name, df._meta, df.divisions)\n\n if npartitions is not None and npartitions != npartitions_orig:\n token = tokenize(df, npartitions)\n repartition_group_token = \"repartition-group-\" + token\n\n dsk = {\n (repartition_group_token, i): (\n shuffle_group_2,\n k,\n column,\n ignore_index,\n npartitions,\n )\n for i, k in enumerate(df.__dask_keys__())\n }\n\n repartition_get_name = \"repartition-get-\" + token\n\n for p in range(npartitions):\n dsk[(repartition_get_name, p)] = (\n shuffle_group_get,\n (repartition_group_token, p % npartitions_orig),\n p,\n )\n\n graph2 = HighLevelGraph.from_collections(\n repartition_get_name, dsk, dependencies=[df]\n )\n df2 = new_dd_object(\n graph2, repartition_get_name, df._meta, [None] * (npartitions + 1)\n )\n else:\n df2 = df\n df2.divisions = (None,) * (npartitions_orig + 1)\n\n return df2\n\n\n########################################################\n# Various convenience functions to be run by the above #\n########################################################\n\n\ndef partitioning_index(df, npartitions):\n \"\"\"\n Computes a deterministic index mapping each record to a partition.\n\n Identical rows are mapped to the same partition.\n\n Parameters\n ----------\n df : DataFrame/Series/Index\n npartitions : int\n The number of partitions to group into.\n\n Returns\n -------\n partitions : ndarray\n An array of int64 values mapping each record to a partition.\n \"\"\"\n return hash_object_dispatch(df, index=False) % int(npartitions)\n\n\ndef barrier(args):\n list(args)\n return 0\n\n\ndef cleanup_partd_files(p, keys):\n \"\"\"\n Cleanup the files in a partd.File dataset.\n\n Parameters\n ----------\n p : partd.Interface\n File or Encode wrapping a file should be OK.\n keys: List\n Just for scheduling purposes, not actually used.\n \"\"\"\n import partd\n\n if isinstance(p, partd.Encode):\n maybe_file = p.partd\n else:\n maybe_file\n\n if isinstance(maybe_file, partd.File):\n path = maybe_file.path\n else:\n path = None\n\n if path:\n shutil.rmtree(path, ignore_errors=True)\n\n\ndef collect(p, part, meta, barrier_token):\n \"\"\"Collect partitions from partd, yield dataframes\"\"\"\n with ensure_cleanup_on_exception(p):\n res = p.get(part)\n return res if len(res) > 0 else meta\n\n\ndef set_partitions_pre(s, divisions, ascending=True):\n try:\n if ascending:\n partitions = divisions.searchsorted(s, side=\"right\") - 1\n else:\n partitions = len(divisions) - divisions.searchsorted(s, side=\"right\") - 1\n except TypeError:\n # When `searchsorted` fails with `TypeError`, it may be\n # caused by nulls in `s`. Try again with the null-values\n # explicitly mapped to the first partition.\n partitions = np.empty(len(s), dtype=\"int32\")\n partitions[s.isna()] = 0\n not_null = s.notna()\n if ascending:\n partitions[not_null] = divisions.searchsorted(s[not_null], side=\"right\") - 1\n else:\n partitions[not_null] = (\n len(divisions) - divisions.searchsorted(s[not_null], side=\"right\") - 1\n )\n partitions[(s >= divisions.iloc[-1]).values] = (\n len(divisions) - 2 if ascending else 0\n )\n return partitions\n\n\ndef shuffle_group_2(df, cols, ignore_index, nparts):\n if not len(df):\n return {}, df\n\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]].astype(np.int32)\n else:\n ind = (\n hash_object_dispatch(df[cols] if cols else df, index=False) % int(nparts)\n ).astype(np.int32)\n\n n = ind.max() + 1\n result2 = group_split_dispatch(df, ind.values.view(), n, ignore_index=ignore_index)\n return result2, df.iloc[:0]\n\n\ndef shuffle_group_get(g_head, i):\n g, head = g_head\n if i in g:\n return g[i]\n else:\n return head\n\n\ndef shuffle_group(df, cols, stage, k, npartitions, ignore_index, nfinal):\n \"\"\"Splits dataframe into groups\n\n The group is determined by their final partition, and which stage we are in\n in the shuffle\n\n Parameters\n ----------\n df: DataFrame\n cols: str or list\n Column name(s) on which to split the dataframe. If ``cols`` is not\n \"_partitions\", hashing will be used to determine target partition\n stage: int\n We shuffle dataframes with many partitions we in a few stages to avoid\n a quadratic number of tasks. This number corresponds to which stage\n we're in, starting from zero up to some small integer\n k: int\n Desired number of splits from this dataframe\n npartition: int\n Total number of output partitions for the full dataframe\n nfinal: int\n Total number of output partitions after repartitioning\n\n Returns\n -------\n out: Dict[int, DataFrame]\n A dictionary mapping integers in {0..k} to dataframes such that the\n hash values of ``df[col]`` are well partitioned.\n \"\"\"\n if isinstance(cols, str):\n cols = [cols]\n\n if cols and cols[0] == \"_partitions\":\n ind = df[cols[0]]\n else:\n ind = hash_object_dispatch(df[cols] if cols else df, index=False)\n if nfinal and nfinal != npartitions:\n ind = ind % int(nfinal)\n\n c = ind.values\n typ = np.min_scalar_type(npartitions * 2)\n\n c = np.mod(c, npartitions).astype(typ, copy=False)\n np.floor_divide(c, k ** stage, out=c)\n np.mod(c, k, out=c)\n\n return group_split_dispatch(df, c, k, ignore_index=ignore_index)\n\n\[email protected]\ndef ensure_cleanup_on_exception(p):\n \"\"\"Ensure a partd.File is cleaned up.\n\n We have several tasks referring to a `partd.File` instance. We want to\n ensure that the file is cleaned up if and only if there's an exception\n in the tasks using the `partd.File`.\n \"\"\"\n try:\n yield\n except Exception:\n # the function (e.g. shuffle_group_3) had an internal exception.\n # We'll cleanup our temporary files and re-raise.\n try:\n p.drop()\n except Exception:\n logger.exception(\"ignoring exception in ensure_cleanup_on_exception\")\n raise\n\n\ndef shuffle_group_3(df, col, npartitions, p):\n with ensure_cleanup_on_exception(p):\n g = df.groupby(col)\n d = {i: g.get_group(i) for i in g.groups}\n p.append(d, fsync=True)\n\n\ndef set_index_post_scalar(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(index_name, drop=drop)\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef set_index_post_series(df, index_name, drop, column_dtype):\n df2 = df.drop(\"_partitions\", axis=1).set_index(\"_index\", drop=True)\n df2.index.name = index_name\n df2.columns = df2.columns.astype(column_dtype)\n return df2\n\n\ndef drop_overlap(df, index):\n return df.drop(index) if index in df.index else df\n\n\ndef get_overlap(df, index):\n return df.loc[[index]] if index in df.index else df._constructor()\n\n\ndef fix_overlap(ddf, overlap):\n \"\"\"Ensures that the upper bound on each partition of ddf (except the last) is exclusive\"\"\"\n name = \"fix-overlap-\" + tokenize(ddf, overlap)\n n = len(ddf.divisions) - 1\n dsk = {(name, i): (ddf._name, i) for i in range(n)}\n\n frames = []\n for i in overlap:\n\n # `frames` is a list of data from previous partitions that we may want to\n # move to partition i. Here, we add \"overlap\" from the previous partition\n # (i-1) to this list.\n frames.append((get_overlap, (ddf._name, i - 1), ddf.divisions[i]))\n\n # Make sure that any data added from partition i-1 to `frames` is removed\n # from partition i-1.\n dsk[(name, i - 1)] = (drop_overlap, dsk[(name, i - 1)], ddf.divisions[i])\n\n # We do not want to move \"overlap\" from the previous partition (i-1) into\n # this partition (i) if the data from this partition will need to be moved\n # to the next partition (i+1) anyway. If we concatenate data too early,\n # we may lose rows (https://github.com/dask/dask/issues/6972).\n if i == ddf.npartitions - 2 or ddf.divisions[i] != ddf.divisions[i + 1]:\n frames.append((ddf._name, i))\n dsk[(name, i)] = (methods.concat, frames)\n frames = []\n\n graph = HighLevelGraph.from_collections(name, dsk, dependencies=[ddf])\n return new_dd_object(graph, name, ddf._meta, ddf.divisions)\n\n\ndef compute_and_set_divisions(df, **kwargs):\n mins = df.index.map_partitions(M.min, meta=df.index)\n maxes = df.index.map_partitions(M.max, meta=df.index)\n mins, maxes = compute(mins, maxes, **kwargs)\n mins = remove_nans(mins)\n maxes = remove_nans(maxes)\n\n if (\n sorted(mins) != list(mins)\n or sorted(maxes) != list(maxes)\n or any(a > b for a, b in zip(mins, maxes))\n ):\n raise ValueError(\n \"Partitions must be sorted ascending with the index\", mins, maxes\n )\n\n df.divisions = tuple(mins) + (list(maxes)[-1],)\n\n overlap = [i for i in range(1, len(mins)) if mins[i] >= maxes[i - 1]]\n return fix_overlap(df, overlap) if overlap else df\n\n\ndef set_sorted_index(df, index, drop=True, divisions=None, **kwargs):\n if not isinstance(index, Series):\n meta = df._meta.set_index(index, drop=drop)\n else:\n meta = df._meta.set_index(index._meta, drop=drop)\n\n result = map_partitions(M.set_index, df, index, drop=drop, meta=meta)\n\n if not divisions:\n return compute_and_set_divisions(result, **kwargs)\n elif len(divisions) != len(df.divisions):\n msg = (\n \"When doing `df.set_index(col, sorted=True, divisions=...)`, \"\n \"divisions indicates known splits in the index column. In this \"\n \"case divisions must be the same length as the existing \"\n \"divisions in `df`\\n\\n\"\n \"If the intent is to repartition into new divisions after \"\n \"setting the index, you probably want:\\n\\n\"\n \"`df.set_index(col, sorted=True).repartition(divisions=divisions)`\"\n )\n raise ValueError(msg)\n\n result.divisions = tuple(divisions)\n return result\n"
] | [
[
"pandas.api.types.is_categorical_dtype",
"pandas.isnull",
"numpy.linspace",
"numpy.floor_divide",
"pandas.Categorical",
"pandas.api.types.is_integer_dtype",
"pandas.api.types.is_list_like",
"pandas.isna",
"numpy.isscalar",
"numpy.mod",
"numpy.min_scalar_type"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.24"
],
"scipy": [],
"tensorflow": []
}
] |
XidaoW/iFac | [
"43c3103051dfffefe8e0eb09fc7fd53b14754519"
] | [
"src/src/engine/myutil/sampler.py"
] | [
"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n'''\nCreated on 2015/04/13\n\n@author: drumichiro\n'''\nimport numpy as np\nimport matplotlib.mlab as mlab\n\n\ndef generateSample(baseLength, mu, sigma, distFunc):\n x = np.empty([])\n np.random.seed(0)\n for i1 in range(len(mu)):\n data = distFunc(mu[i1], sigma[i1], baseLength)\n x = data if x.shape == () else np.append(x, data, axis=0)\n return x\n\n\ndef generateScalarSample(baseLength, mu, sigma):\n return generateSample(baseLength, mu, sigma, np.random.normal)\n\n\ndef generateVectorSample(baseLength, mu, sigma):\n return generateSample(baseLength, mu, sigma,\n np.random.multivariate_normal)\n\n\ndef gaussian1d(x, mu, sigma):\n return mlab.normpdf(x, mu, sigma)\n\n\ndef gaussian2d(x, mu, sigma):\n return mlab.bivariate_normal(x[..., 0], x[..., 1],\n np.sqrt(sigma[0, 0]), np.sqrt(sigma[1, 1]),\n mu[0], mu[1], sigma[0, 1])\n"
] | [
[
"numpy.sqrt",
"numpy.random.seed",
"numpy.empty",
"numpy.append",
"matplotlib.mlab.normpdf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bainro/garage | [
"c5afbb19524792d9bbad9b9741f45e1d48ddca3d",
"c5afbb19524792d9bbad9b9741f45e1d48ddca3d"
] | [
"src/garage/tf/policies/categorical_cnn_policy.py",
"tests/garage/replay_buffer/test_replay_buffer.py"
] | [
"\"\"\"CategoricalCNNPolicy with model.\"\"\"\nimport akro\nimport tensorflow as tf\n\nfrom garage.tf.distributions import Categorical\nfrom garage.tf.models import CNNModel\nfrom garage.tf.models import MLPModel\nfrom garage.tf.models import Sequential\nfrom garage.tf.policies import StochasticPolicy\n\n\nclass CategoricalCNNPolicy(StochasticPolicy):\n \"\"\"CategoricalCNNPolicy.\n\n A policy that contains a CNN and a MLP to make prediction based on\n a categorical distribution.\n\n It only works with akro.Discrete action space.\n\n Args:\n env_spec (garage.envs.env_spec.EnvSpec): Environment specification.\n conv_filter_sizes(tuple[int]): Dimension of the filters. For example,\n (3, 5) means there are two convolutional layers. The filter for\n first layer is of dimension (3 x 3) and the second one is of\n dimension (5 x 5).\n conv_filters(tuple[int]): Number of filters. For example, (3, 32) means\n there are two convolutional layers. The filter for the first layer\n has 3 channels and the second one with 32 channels.\n conv_strides(tuple[int]): The stride of the sliding window. For\n example, (1, 2) means there are two convolutional layers. The\n stride of the filter for first layer is 1 and that of the second\n layer is 2.\n conv_pad (str): The type of padding algorithm to use,\n either 'SAME' or 'VALID'.\n name (str): Policy name, also the variable scope of the policy.\n hidden_sizes (list[int]): Output dimension of dense layer(s).\n For example, (32, 32) means the MLP of this policy consists\n of two hidden layers, each with 32 hidden units.\n hidden_nonlinearity (callable): Activation function for intermediate\n dense layer(s). It should return a tf.Tensor. Set it to\n None to maintain a linear activation.\n hidden_w_init (callable): Initializer function for the weight\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n hidden_b_init (callable): Initializer function for the bias\n of intermediate dense layer(s). The function should return a\n tf.Tensor.\n output_nonlinearity (callable): Activation function for output dense\n layer. It should return a tf.Tensor. Set it to None to\n maintain a linear activation.\n output_w_init (callable): Initializer function for the weight\n of output dense layer(s). The function should return a\n tf.Tensor.\n output_b_init (callable): Initializer function for the bias\n of output dense layer(s). The function should return a\n tf.Tensor.\n layer_normalization (bool): Bool for using layer normalization or not.\n \"\"\"\n\n def __init__(self,\n env_spec,\n conv_filters,\n conv_filter_sizes,\n conv_strides,\n conv_pad,\n name='CategoricalCNNPolicy',\n hidden_sizes=(32, 32),\n hidden_nonlinearity=tf.nn.relu,\n hidden_w_init=tf.initializers.glorot_uniform(),\n hidden_b_init=tf.zeros_initializer(),\n output_nonlinearity=tf.nn.softmax,\n output_w_init=tf.initializers.glorot_uniform(),\n output_b_init=tf.zeros_initializer(),\n layer_normalization=False):\n if not isinstance(env_spec.action_space, akro.Discrete):\n raise ValueError(\n 'CategoricalCNNPolicy only works with akro.Discrete action '\n 'space.')\n\n if not isinstance(env_spec.observation_space, akro.Box) or \\\n not len(env_spec.observation_space.shape) in (2, 3):\n raise ValueError(\n '{} can only process 2D, 3D akro.Image or'\n ' akro.Box observations, but received an env_spec with '\n 'observation_space of type {} and shape {}'.format(\n type(self).__name__,\n type(env_spec.observation_space).__name__,\n env_spec.observation_space.shape))\n\n super().__init__(name, env_spec)\n self.obs_dim = env_spec.observation_space.shape\n self.action_dim = env_spec.action_space.n\n\n self.model = Sequential(\n CNNModel(filter_dims=conv_filter_sizes,\n num_filters=conv_filters,\n strides=conv_strides,\n padding=conv_pad,\n hidden_nonlinearity=hidden_nonlinearity,\n name='CNNModel'),\n MLPModel(output_dim=self.action_dim,\n hidden_sizes=hidden_sizes,\n hidden_nonlinearity=hidden_nonlinearity,\n hidden_w_init=hidden_w_init,\n hidden_b_init=hidden_b_init,\n output_nonlinearity=output_nonlinearity,\n output_w_init=output_w_init,\n output_b_init=output_b_init,\n layer_normalization=layer_normalization,\n name='MLPModel'))\n\n self._initialize()\n\n def _initialize(self):\n if isinstance(self.env_spec.observation_space, akro.Image):\n state_input = tf.compat.v1.placeholder(tf.uint8,\n shape=(None, ) +\n self.obs_dim)\n state_input = tf.cast(state_input, tf.float32)\n state_input /= 255.0\n else:\n state_input = tf.compat.v1.placeholder(tf.float32,\n shape=(None, ) +\n self.obs_dim)\n\n with tf.compat.v1.variable_scope(self.name) as vs:\n self._variable_scope = vs\n self.model.build(state_input)\n\n self._f_prob = tf.compat.v1.get_default_session().make_callable(\n self.model.outputs, feed_list=[self.model.input])\n\n @property\n def vectorized(self):\n \"\"\"Vectorized or not.\n\n Returns:\n bool: True if primitive supports vectorized operations.\n \"\"\"\n return True\n\n def dist_info_sym(self, obs_var, state_info_vars=None, name=None):\n \"\"\"Build a symbolic graph of the distribution parameters.\n\n Args:\n obs_var (tf.Tensor): Tensor input for symbolic graph.\n state_info_vars (dict[np.ndarray]): Extra state information, e.g.\n previous action.\n name (str): Name for symbolic graph.\n\n Returns:\n dict[tf.Tensor]: Outputs of the symbolic graph of distribution\n parameters.\n\n \"\"\"\n with tf.compat.v1.variable_scope(self._variable_scope):\n if isinstance(self.env_spec.observation_space, akro.Image):\n obs_var = tf.cast(obs_var, tf.float32) / 255.0\n\n prob = self.model.build(obs_var, name=name)\n return dict(prob=prob)\n\n def dist_info(self, obs, state_infos=None):\n \"\"\"Get distribution parameters.\n\n Args:\n obs (np.ndarray): Observation input.\n state_infos (dict[np.ndarray]): Extra state information, e.g.\n previous action.\n\n Returns:\n dict[np.ndarray]: Distribution parameters.\n\n \"\"\"\n prob = self._f_prob(obs)\n return dict(prob=prob)\n\n def get_action(self, observation):\n \"\"\"Get single action from this policy for the input observation.\n\n Args:\n observation (numpy.ndarray): Observation from environment.\n\n Returns:\n numpy.ndarray: Predicted action.\n dict[str: np.ndarray]: Action distribution.\n\n \"\"\"\n if len(observation.shape) < len(self.obs_dim):\n observation = self.env_spec.observation_space.unflatten(\n observation)\n prob = self._f_prob([observation])[0]\n action = self.action_space.weighted_sample(prob)\n return action, dict(prob=prob)\n\n def get_actions(self, observations):\n \"\"\"Get multiple actions from this policy for the input observations.\n\n Args:\n observations (numpy.ndarray): Observations from environment.\n\n Returns:\n numpy.ndarray: Predicted actions.\n dict[str: np.ndarray]: Action distributions.\n\n \"\"\"\n if len(observations[0].shape) < len(self.obs_dim):\n observations = self.env_spec.observation_space.unflatten_n(\n observations)\n probs = self._f_prob(observations)\n actions = list(map(self.action_space.weighted_sample, probs))\n return actions, dict(prob=probs)\n\n @property\n def distribution(self):\n \"\"\"Policy distribution.\n\n Returns:\n garage.tf.distributions.Categorical: Policy distribution.\n\n \"\"\"\n return Categorical(self.action_dim)\n\n def __getstate__(self):\n \"\"\"Object.__getstate__.\n\n Returns:\n dict: The state to be pickled for the instance.\n\n \"\"\"\n new_dict = super().__getstate__()\n del new_dict['_f_prob']\n return new_dict\n\n def __setstate__(self, state):\n \"\"\"Object.__setstate__.\n\n Args:\n state (dict): Unpickled state.\n\n \"\"\"\n super().__setstate__(state)\n self._initialize()\n",
"import pickle\n\nimport numpy as np\n\nfrom garage.replay_buffer import SimpleReplayBuffer\nfrom tests.fixtures.envs.dummy import DummyDiscreteEnv\n\n\nclass TestReplayBuffer:\n\n def test_add_transition_dtype(self):\n env = DummyDiscreteEnv()\n obs = env.reset()\n replay_buffer = SimpleReplayBuffer(env_spec=env,\n size_in_transitions=3,\n time_horizon=1)\n replay_buffer.add_transition(observation=obs,\n action=env.action_space.sample())\n sample = replay_buffer.sample(1)\n sample_obs = sample['observation']\n sample_action = sample['action']\n\n assert sample_obs.dtype == env.observation_space.dtype\n assert sample_action.dtype == env.action_space.dtype\n\n def test_add_transitions_dtype(self):\n env = DummyDiscreteEnv()\n obs = env.reset()\n replay_buffer = SimpleReplayBuffer(env_spec=env,\n size_in_transitions=3,\n time_horizon=1)\n replay_buffer.add_transitions(observation=[obs],\n action=[env.action_space.sample()])\n sample = replay_buffer.sample(1)\n sample_obs = sample['observation']\n sample_action = sample['action']\n\n assert sample_obs.dtype == env.observation_space.dtype\n assert sample_action.dtype == env.action_space.dtype\n\n def test_eviction_policy(self):\n env = DummyDiscreteEnv()\n obs = env.reset()\n\n replay_buffer = SimpleReplayBuffer(env_spec=env,\n size_in_transitions=3,\n time_horizon=1)\n replay_buffer.add_transitions(observation=[obs, obs], action=[1, 2])\n assert not replay_buffer.full\n replay_buffer.add_transitions(observation=[obs, obs], action=[3, 4])\n assert replay_buffer.full\n replay_buffer.add_transitions(observation=[obs, obs], action=[5, 6])\n replay_buffer.add_transitions(observation=[obs, obs], action=[7, 8])\n\n assert np.array_equal(replay_buffer._buffer['action'], [[7], [8], [6]])\n assert replay_buffer.n_transitions_stored == 3\n\n def test_pickleable(self):\n env = DummyDiscreteEnv()\n obs = env.reset()\n\n replay_buffer = SimpleReplayBuffer(env_spec=env,\n size_in_transitions=100,\n time_horizon=1)\n for _ in range(0, 100):\n replay_buffer.add_transitions(observation=[obs], action=[1])\n replay_buffer_pickled = pickle.loads(pickle.dumps(replay_buffer))\n assert replay_buffer_pickled._buffer.keys(\n ) == replay_buffer._buffer.keys()\n for k in replay_buffer_pickled._buffer:\n assert replay_buffer_pickled._buffer[\n k].shape == replay_buffer._buffer[k].shape\n"
] | [
[
"tensorflow.initializers.glorot_uniform",
"tensorflow.zeros_initializer",
"tensorflow.cast",
"tensorflow.compat.v1.get_default_session",
"tensorflow.compat.v1.placeholder",
"tensorflow.compat.v1.variable_scope"
],
[
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
choishingwan/PRScsx | [
"f7941665c2d86754dbc94a25f2d6d19aea23be32",
"f7941665c2d86754dbc94a25f2d6d19aea23be32"
] | [
"gigrnd.py",
"parse_genet.py"
] | [
"#!/usr/bin/env python\n\n\"\"\"\nRandom variate generator for the generalized inverse Gaussian distribution.\nReference: L Devroye. Random variate generation for the generalized inverse Gaussian distribution.\n Statistics and Computing, 24(2):239-246, 2014.\n\n\"\"\"\n\n\nimport math\nfrom scipy import random\n\n\ndef psi(x, alpha, lam):\n f = -alpha*(math.cosh(x)-1)-lam*(math.exp(x)-x-1)\n return f\n\n\ndef dpsi(x, alpha, lam):\n f = -alpha*math.sinh(x)-lam*(math.exp(x)-1)\n return f\n\n\ndef g(x, sd, td, f1, f2):\n if (x >= -sd) and (x <= td):\n f = 1\n elif x > td:\n f = f1\n elif x < -sd:\n f = f2\n\n return f\n\n\ndef gigrnd(p, a, b):\n # setup -- sample from the two-parameter version gig(lam,omega)\n p = float(p); a = float(a); b = float(b)\n lam = p\n omega = math.sqrt(a*b)\n\n if lam < 0:\n lam = -lam\n swap = True\n else:\n swap = False\n\n alpha = math.sqrt(math.pow(omega,2)+math.pow(lam,2))-lam\n\n # find t\n x = -psi(1, alpha, lam)\n if (x >= 1/2) and (x <= 2):\n t = 1\n elif x > 2:\n t = math.sqrt(2/(alpha+lam))\n elif x < 1/2:\n t = math.log(4/(alpha+2*lam))\n\n # find s\n x = -psi(-1, alpha, lam)\n if (x >= 1/2) and (x <= 2):\n s = 1\n elif x > 2:\n s = math.sqrt(4/(alpha*math.cosh(1)+lam))\n elif x < 1/2:\n if alpha == 0:\n s = 1/lam\n else:\n if lam==0:\n s = math.log(1+1/alpha+math.sqrt(1/math.pow(alpha,2)+2/alpha))\n else:\n s = min(1/lam, math.log(1+1/alpha+math.sqrt(1/math.pow(alpha,2)+2/alpha)))\n\n # find auxiliary parameters\n eta = -psi(t, alpha, lam)\n zeta = -dpsi(t, alpha, lam)\n theta = -psi(-s, alpha, lam)\n xi = dpsi(-s, alpha, lam)\n\n p = 1/xi\n r = 1/zeta\n\n td = t-r*eta\n sd = s-p*theta\n q = td+sd\n\n # random variate generation\n while True:\n U = random.random()\n V = random.random()\n W = random.random()\n if U < q/(p+q+r):\n rnd = -sd+q*V\n elif U < (q+r)/(p+q+r):\n rnd = td-r*math.log(V)\n else:\n rnd = -sd+p*math.log(V)\n\n f1 = math.exp(-eta-zeta*(rnd-t))\n f2 = math.exp(-theta+xi*(rnd+s))\n if W*g(rnd, sd, td, f1, f2) <= math.exp(psi(rnd, alpha, lam)):\n break\n\n # transform back to the three-parameter version gig(p,a,b)\n rnd = math.exp(rnd)*(lam/omega+math.sqrt(1+math.pow(lam,2)/math.pow(omega,2)))\n if swap:\n rnd = 1/rnd\n\n rnd = rnd/math.sqrt(a/b)\n return rnd\n\n\n",
"#!/usr/bin/env python\n\n\"\"\"\nParse the reference panel, summary statistics, and validation set.\n\n\"\"\"\n\n\nimport scipy as sp\nfrom scipy.stats import norm\nimport h5py\n\n\ndef parse_ref(ref_file, chrom):\n print('... parse reference file: %s ...' % ref_file)\n\n ref_dict = {'CHR':[], 'SNP':[], 'BP':[], 'A1':[], 'A2':[], 'FRQ_EUR':[], 'FRQ_EAS':[], 'FRQ_AFR':[], 'FLP_EUR':[], 'FLP_EAS':[], 'FLP_AFR':[]}\n with open(ref_file) as ff:\n header = next(ff)\n for line in ff:\n ll = (line.strip()).split()\n if int(ll[0]) == chrom:\n ref_dict['CHR'].append(chrom)\n ref_dict['SNP'].append(ll[1])\n ref_dict['BP'].append(int(ll[2]))\n ref_dict['A1'].append(ll[3])\n ref_dict['A2'].append(ll[4])\n ref_dict['FRQ_EUR'].append(float(ll[5]))\n ref_dict['FRQ_EAS'].append(float(ll[6]))\n ref_dict['FRQ_AFR'].append(float(ll[7]))\n ref_dict['FLP_EUR'].append(int(ll[8]))\n ref_dict['FLP_EAS'].append(int(ll[9]))\n ref_dict['FLP_AFR'].append(int(ll[10]))\n\n print('... %d SNPs on chromosome %d read from %s ...' % (len(ref_dict['SNP']), chrom, ref_file))\n return ref_dict\n\n\ndef parse_bim(bim_file, chrom):\n print('... parse bim file: %s ...' % (bim_file + '.bim'))\n\n vld_dict = {'SNP':[], 'A1':[], 'A2':[]}\n with open(bim_file + '.bim') as ff:\n for line in ff:\n ll = (line.strip()).split()\n if int(ll[0]) == chrom:\n vld_dict['SNP'].append(ll[1])\n vld_dict['A1'].append(ll[4])\n vld_dict['A2'].append(ll[5])\n\n print('... %d SNPs on chromosome %d read from %s ...' % (len(vld_dict['SNP']), chrom, bim_file + '.bim'))\n return vld_dict\n\n\ndef parse_sumstats(ref_dict, vld_dict, sst_file, pop, n_subj):\n print('... parse ' + pop.upper() + ' sumstats file: %s ...' % sst_file)\n\n ATGC = ['A', 'T', 'G', 'C']\n sst_dict = {'SNP':[], 'A1':[], 'A2':[]}\n with open(sst_file) as ff:\n header = next(ff)\n for line in ff:\n ll = (line.strip()).split()\n if ll[1] in ATGC and ll[2] in ATGC:\n sst_dict['SNP'].append(ll[0])\n sst_dict['A1'].append(ll[1])\n sst_dict['A2'].append(ll[2])\n\n print('... %d SNPs read from %s ...' % (len(sst_dict['SNP']), sst_file))\n\n\n idx = [ii for (ii,frq) in enumerate(ref_dict['FRQ_'+pop.upper()]) if frq>0]\n snp_ref = [ref_dict['SNP'][ii] for ii in idx]\n a1_ref = [ref_dict['A1'][ii] for ii in idx]\n a2_ref = [ref_dict['A2'][ii] for ii in idx]\n\n\n mapping = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}\n\n vld_snp = set(zip(vld_dict['SNP'], vld_dict['A1'], vld_dict['A2']))\n\n ref_snp = set(zip(snp_ref, a1_ref, a2_ref)) | set(zip(snp_ref, a2_ref, a1_ref)) | \\\n set(zip(snp_ref, [mapping[aa] for aa in a1_ref], [mapping[aa] for aa in a2_ref])) | \\\n set(zip(snp_ref, [mapping[aa] for aa in a2_ref], [mapping[aa] for aa in a1_ref]))\n\n sst_snp = set(zip(sst_dict['SNP'], sst_dict['A1'], sst_dict['A2'])) | set(zip(sst_dict['SNP'], sst_dict['A2'], sst_dict['A1'])) | \\\n set(zip(sst_dict['SNP'], [mapping[aa] for aa in sst_dict['A1']], [mapping[aa] for aa in sst_dict['A2']])) | \\\n set(zip(sst_dict['SNP'], [mapping[aa] for aa in sst_dict['A2']], [mapping[aa] for aa in sst_dict['A1']]))\n\n comm_snp = vld_snp & ref_snp & sst_snp\n\n print('... %d common SNPs in the %s reference, %s sumstats, and validation set ...' % (len(comm_snp), pop.upper(), pop.upper()))\n\n\n n_sqrt = sp.sqrt(n_subj)\n sst_eff = {}\n with open(sst_file) as ff:\n header = (next(ff).strip()).split()\n header = [col.upper() for col in header]\n for line in ff:\n ll = (line.strip()).split()\n snp = ll[0]; a1 = ll[1]; a2 = ll[2]\n if a1 not in ATGC or a2 not in ATGC:\n continue\n if (snp, a1, a2) in comm_snp or (snp, mapping[a1], mapping[a2]) in comm_snp:\n if 'BETA' in header:\n beta = float(ll[3])\n elif 'OR' in header:\n beta = sp.log(float(ll[3]))\n\n p = max(float(ll[4]), 1e-323)\n beta_std = sp.sign(beta)*abs(norm.ppf(p/2.0))/n_sqrt\n sst_eff.update({snp: beta_std})\n elif (snp, a2, a1) in comm_snp or (snp, mapping[a2], mapping[a1]) in comm_snp:\n if 'BETA' in header:\n beta = float(ll[3])\n elif 'OR' in header:\n beta = sp.log(float(ll[3]))\n\n p = max(float(ll[4]), 1e-323)\n beta_std = -1*sp.sign(beta)*abs(norm.ppf(p/2.0))/n_sqrt\n sst_eff.update({snp: beta_std})\n\n\n sst_dict = {'SNP':[], 'FRQ':[], 'BETA':[], 'FLP':[]}\n for (ii,snp) in enumerate(ref_dict['SNP']):\n if snp in sst_eff:\n sst_dict['SNP'].append(snp)\n sst_dict['BETA'].append(sst_eff[snp])\n\n a1 = ref_dict['A1'][ii]; a2 = ref_dict['A2'][ii]\n if (snp, a1, a2) in comm_snp or (snp, mapping[a1], mapping[a2]) in comm_snp:\n sst_dict['FRQ'].append(ref_dict['FRQ_'+pop.upper()][ii])\n sst_dict['FLP'].append(ref_dict['FLP_'+pop.upper()][ii])\n elif (snp, a2, a1) in comm_snp or (snp, mapping[a2], mapping[a1]) in comm_snp:\n sst_dict['FRQ'].append(1-ref_dict['FRQ_'+pop.upper()][ii])\n sst_dict['FLP'].append(-1*ref_dict['FLP_'+pop.upper()][ii])\n\n return sst_dict\n\n\ndef parse_ldblk(ldblk_dir, sst_dict, pop, chrom):\n print('... parse %s reference LD on chromosome %d ...' % (pop.upper(), chrom))\n\n chr_name = ldblk_dir + '/ldblk_1kg_' + pop.lower() + '/ldblk_1kg_chr' + str(chrom) + '.hdf5'\n hdf_chr = h5py.File(chr_name, 'r')\n n_blk = len(hdf_chr)\n ld_blk = [sp.array(hdf_chr['blk_'+str(blk)]['ldblk']) for blk in range(1,n_blk+1)]\n\n snp_blk = []\n for blk in range(1,n_blk+1):\n snp_blk.append([bb.decode(\"UTF-8\") for bb in list(hdf_chr['blk_'+str(blk)]['snplist'])])\n\n blk_size = []\n mm = 0\n for blk in range(n_blk):\n idx = [ii for (ii,snp) in enumerate(snp_blk[blk]) if snp in sst_dict['SNP']]\n blk_size.append(len(idx))\n if idx != []:\n idx_blk = range(mm,mm+len(idx))\n flip = [sst_dict['FLP'][jj] for jj in idx_blk]\n ld_blk[blk] = ld_blk[blk][sp.ix_(idx,idx)]*sp.outer(flip,flip)\n mm += len(idx)\n else:\n ld_blk[blk] = sp.array([])\n\n return ld_blk, blk_size\n\n\ndef align_ldblk(ref_dict, vld_dict, sst_dict, n_pop, chrom):\n print('... align reference LD on chromosome %d across populations ...' % chrom)\n\n snp_dict = {'CHR':[], 'SNP':[], 'BP':[], 'A1':[], 'A2':[]}\n for (ii,snp) in enumerate(ref_dict['SNP']):\n for pp in range(n_pop):\n if snp in sst_dict[pp]['SNP']:\n snp_dict['SNP'].append(snp)\n snp_dict['CHR'].append(ref_dict['CHR'][ii])\n snp_dict['BP'].append(ref_dict['BP'][ii])\n\n idx = vld_dict['SNP'].index(snp)\n snp_dict['A1'].append(vld_dict['A1'][idx])\n snp_dict['A2'].append(vld_dict['A2'][idx])\n break\n\n n_snp = len(snp_dict['SNP'])\n print('... %d valid SNPs across populations ...' % n_snp)\n\n beta_dict = {}\n frq_dict = {}\n idx_dict = {}\n for pp in range(n_pop):\n beta_dict[pp] = sp.array(sst_dict[pp]['BETA'], ndmin=2).T\n frq_dict[pp] = sp.array(sst_dict[pp]['FRQ'], ndmin=2).T\n idx_dict[pp] = [ii for (ii,snp) in enumerate(snp_dict['SNP']) if snp in sst_dict[pp]['SNP']]\n\n return snp_dict, beta_dict, frq_dict, idx_dict\n\n\n"
] | [
[
"scipy.random.random"
],
[
"scipy.stats.norm.ppf",
"scipy.outer",
"scipy.sqrt",
"scipy.ix_",
"scipy.array",
"scipy.sign"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
landreman/pyQSC | [
"75f34d62c24eb94f481632ee0e1bf260d7581f2a"
] | [
"qsc/qsc.py"
] | [
"\"\"\"\nThis module contains the top-level routines for the quasisymmetric\nstellarator construction.\n\"\"\"\n\nimport logging\nimport numpy as np\nfrom scipy.io import netcdf\n#from numba import jit\n\n#logging.basicConfig(level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\nclass Qsc():\n \"\"\"\n This is the main class for representing the quasisymmetric\n stellarator construction.\n \"\"\"\n \n # Import methods that are defined in separate files:\n from .init_axis import init_axis, convert_to_spline\n from .calculate_r1 import _residual, _jacobian, solve_sigma_equation, \\\n _determine_helicity, r1_diagnostics\n from .grad_B_tensor import calculate_grad_B_tensor, calculate_grad_grad_B_tensor, \\\n Bfield_cylindrical, Bfield_cartesian, grad_B_tensor_cartesian, \\\n grad_grad_B_tensor_cylindrical, grad_grad_B_tensor_cartesian\n from .calculate_r2 import calculate_r2\n from .calculate_r3 import calculate_r3\n from .mercier import mercier\n from .r_singularity import calculate_r_singularity\n from .plot import plot, plot_boundary, get_boundary, B_fieldline, B_contour, plot_axis\n from .Frenet_to_cylindrical import Frenet_to_cylindrical\n from .to_vmec import to_vmec\n from .util import B_mag\n \n def __init__(self, rc, zs, rs=[], zc=[], nfp=1, etabar=1., sigma0=0., B0=1.,\n I2=0., sG=1, spsi=1, nphi=61, B2s=0., B2c=0., p2=0., order=\"r1\"):\n \"\"\"\n Create a quasisymmetric stellarator.\n \"\"\"\n # First, force {rc, zs, rs, zc} to have the same length, for\n # simplicity.\n nfourier = np.max([len(rc), len(zs), len(rs), len(zc)])\n self.nfourier = nfourier\n self.rc = np.zeros(nfourier)\n self.zs = np.zeros(nfourier)\n self.rs = np.zeros(nfourier)\n self.zc = np.zeros(nfourier)\n self.rc[:len(rc)] = rc\n self.zs[:len(zs)] = zs\n self.rs[:len(rs)] = rs\n self.zc[:len(zc)] = zc\n\n # Force nphi to be odd:\n if np.mod(nphi, 2) == 0:\n nphi += 1\n\n if sG != 1 and sG != -1:\n raise ValueError('sG must be +1 or -1')\n \n if spsi != 1 and spsi != -1:\n raise ValueError('spsi must be +1 or -1')\n\n self.nfp = nfp\n self.etabar = etabar\n self.sigma0 = sigma0\n self.B0 = B0\n self.I2 = I2\n self.sG = sG\n self.spsi = spsi\n self.nphi = nphi\n self.B2s = B2s\n self.B2c = B2c\n self.p2 = p2\n self.order = order\n self.min_R0_threshold = 0.3\n self._set_names()\n\n self.calculate()\n\n def change_nfourier(self, nfourier_new):\n \"\"\"\n Resize the arrays of Fourier amplitudes. You can either increase\n or decrease nfourier.\n \"\"\"\n rc_old = self.rc\n rs_old = self.rs\n zc_old = self.zc\n zs_old = self.zs\n index = np.min((self.nfourier, nfourier_new))\n self.rc = np.zeros(nfourier_new)\n self.rs = np.zeros(nfourier_new)\n self.zc = np.zeros(nfourier_new)\n self.zs = np.zeros(nfourier_new)\n self.rc[:index] = rc_old[:index]\n self.rs[:index] = rs_old[:index]\n self.zc[:index] = zc_old[:index]\n self.zs[:index] = zs_old[:index]\n nfourier_old = self.nfourier\n self.nfourier = nfourier_new\n self._set_names()\n # No need to recalculate if we increased the Fourier\n # resolution, only if we decreased it.\n if nfourier_new < nfourier_old:\n self.calculate()\n\n def calculate(self):\n \"\"\"\n Driver for the main calculations.\n \"\"\"\n self.init_axis()\n self.solve_sigma_equation()\n self.r1_diagnostics()\n if self.order != 'r1':\n self.calculate_r2()\n if self.order == 'r3':\n self.calculate_r3()\n \n def get_dofs(self):\n \"\"\"\n Return a 1D numpy vector of all possible optimizable\n degrees-of-freedom, for simsopt.\n \"\"\"\n return np.concatenate((self.rc, self.zs, self.rs, self.zc,\n np.array([self.etabar, self.sigma0, self.B2s, self.B2c, self.p2, self.I2, self.B0])))\n\n def set_dofs(self, x):\n \"\"\"\n For interaction with simsopt, set the optimizable degrees of\n freedom from a 1D numpy vector.\n \"\"\"\n assert len(x) == self.nfourier * 4 + 7\n self.rc = x[self.nfourier * 0 : self.nfourier * 1]\n self.zs = x[self.nfourier * 1 : self.nfourier * 2]\n self.rs = x[self.nfourier * 2 : self.nfourier * 3]\n self.zc = x[self.nfourier * 3 : self.nfourier * 4]\n self.etabar = x[self.nfourier * 4 + 0]\n self.sigma0 = x[self.nfourier * 4 + 1]\n self.B2s = x[self.nfourier * 4 + 2]\n self.B2c = x[self.nfourier * 4 + 3]\n self.p2 = x[self.nfourier * 4 + 4]\n self.I2 = x[self.nfourier * 4 + 5]\n self.B0 = x[self.nfourier * 4 + 6]\n self.calculate()\n logger.info('set_dofs called with x={}. Now iota={}, elongation={}'.format(x, self.iota, self.max_elongation))\n \n def _set_names(self):\n \"\"\"\n For simsopt, sets the list of names for each degree of freedom.\n \"\"\"\n names = []\n names += ['rc({})'.format(j) for j in range(self.nfourier)]\n names += ['zs({})'.format(j) for j in range(self.nfourier)]\n names += ['rs({})'.format(j) for j in range(self.nfourier)]\n names += ['zc({})'.format(j) for j in range(self.nfourier)]\n names += ['etabar', 'sigma0', 'B2s', 'B2c', 'p2', 'I2', 'B0']\n self.names = names\n\n @classmethod\n def from_paper(cls, name, **kwargs):\n \"\"\"\n Get one of the configurations that has been used in our papers.\n Available values for ``name`` are\n ``\"r1 section 5.1\"``,\n ``\"r1 section 5.2\"``,\n ``\"r1 section 5.3\"``,\n ``\"r2 section 5.1\"``,\n ``\"r2 section 5.2\"``,\n ``\"r2 section 5.3\"``,\n ``\"r2 section 5.4\"``, and\n ``\"r2 section 5.5\"``.\n These last 5 configurations can also be obtained by specifying an integer 1-5 for ``name``.\n The configurations that begin with ``\"r1\"`` refer to sections in \n Landreman, Sengupta, and Plunk, Journal of Plasma Physics 85, 905850103 (2019).\n The configurations that begin with ``\"r2\"`` refer to sections in \n Landreman and Sengupta, Journal of Plasma Physics 85, 815850601 (2019).\n\n You can specify any other arguments of the ``Qsc`` constructor\n in ``kwargs``. You can also use ``kwargs`` to override any of\n the properties of the configurations from the papers. For\n instance, you can modify the value of ``etabar`` in the first\n example using\n\n .. code-block::\n\n q = qsc.Qsc.from_paper('r1 section 5.1', etabar=1.1)\n \"\"\"\n\n def add_default_args(kwargs_old, **kwargs_new):\n \"\"\"\n Take any key-value arguments in ``kwargs_new`` and treat them as\n defaults, adding them to the dict ``kwargs_old`` only if\n they are not specified there.\n \"\"\"\n for key in kwargs_new:\n if key not in kwargs_old:\n kwargs_old[key] = kwargs_new[key]\n\n \n if name == \"r1 section 5.1\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.1 \"\"\"\n add_default_args(kwargs, rc=[1, 0.045], zs=[0, -0.045], nfp=3, etabar=-0.9)\n \n elif name == \"r1 section 5.2\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.2 \"\"\"\n add_default_args(kwargs, rc=[1, 0.265], zs=[0, -0.21], nfp=4, etabar=-2.25)\n \n elif name == \"r1 section 5.3\":\n \"\"\" The configuration from Landreman, Sengupta, Plunk (2019), section 5.3 \"\"\"\n add_default_args(kwargs, rc=[1, 0.042], zs=[0, -0.042], zc=[0, -0.025], nfp=3, etabar=-1.1, sigma0=-0.6)\n \n elif name == \"r2 section 5.1\" or name == '5.1' or name == 1:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.1 \"\"\"\n add_default_args(kwargs, rc=[1, 0.155, 0.0102], zs=[0, 0.154, 0.0111], nfp=2, etabar=0.64, order='r3', B2c=-0.00322)\n \n elif name == \"r2 section 5.2\" or name == '5.2' or name == 2:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.2 \"\"\"\n add_default_args(kwargs, rc=[1, 0.173, 0.0168, 0.00101], zs=[0, 0.159, 0.0165, 0.000985], nfp=2, etabar=0.632, order='r3', B2c=-0.158)\n \n elif name == \"r2 section 5.3\" or name == '5.3' or name == 3:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.3 \"\"\"\n add_default_args(kwargs, rc=[1, 0.09], zs=[0, -0.09], nfp=2, etabar=0.95, I2=0.9, order='r3', B2c=-0.7, p2=-600000.)\n \n elif name == \"r2 section 5.4\" or name == '5.4' or name == 4:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.4 \"\"\"\n add_default_args(kwargs, rc=[1, 0.17, 0.01804, 0.001409, 5.877e-05],\n zs=[0, 0.1581, 0.01820, 0.001548, 7.772e-05], nfp=4, etabar=1.569, order='r3', B2c=0.1348)\n \n elif name == \"r2 section 5.5\" or name == '5.5' or name == 5:\n \"\"\" The configuration from Landreman & Sengupta (2019), section 5.5 \"\"\"\n add_default_args(kwargs, rc=[1, 0.3], zs=[0, 0.3], nfp=5, etabar=2.5, sigma0=0.3, I2=1.6, order='r3', B2c=1., B2s=3., p2=-0.5e7)\n\n elif name == \"LandremanPaul2021QA\" or name == \"precise QA\":\n \"\"\"\n A fit of the near-axis model to the quasi-axisymmetric\n configuration in Landreman & Paul, arXiv:2108.03711 (2021).\n\n The fit was performed to the boozmn data using the script\n 20200621-01-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=2,\n rc=[1.0038581971135636, 0.18400998741139907, 0.021723381370503204, 0.0025968236014410812, 0.00030601568477064874, 3.5540509760304384e-05, 4.102693907398271e-06, 5.154300428457222e-07, 4.8802742243232844e-08, 7.3011320375259876e-09],\n zs=[0.0, -0.1581148860568176, -0.02060702320552523, -0.002558840496952667, -0.0003061368667524159, -3.600111450532304e-05, -4.174376962124085e-06, -4.557462755956434e-07, -8.173481495049928e-08, -3.732477282851326e-09],\n B0=1.006541121335688,\n etabar=-0.6783912804454629,\n B2c=0.26859318908803137,\n nphi=99,\n order='r3')\n\n elif name == \"precise QA+well\":\n \"\"\"\n A fit of the near-axis model to the precise quasi-axisymmetric\n configuration from SIMSOPT with magnetic well.\n\n The fit was performed to the boozmn data using the script\n 20200621-01-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=2,\n rc=[1.0145598919163676, 0.2106377247598754, 0.025469267136340394, 0.0026773601516136727, 0.00021104172568911153, 7.891887175655046e-06, -8.216044358250985e-07, -2.379942694112007e-07, -2.5495108673798585e-08, 1.1679227114962395e-08, 8.961288962248274e-09],\n zs=[0.0, -0.14607192982551795, -0.021340448470388084, -0.002558983303282255, -0.0002355043952788449, -1.2752278964149462e-05, 3.673356209179739e-07, 9.261098628194352e-08, -7.976283362938471e-09, -4.4204430633540756e-08, -1.6019372369445714e-08],\n B0=1.0117071561808106,\n etabar=-0.5064143402495729,\n B2c=-0.2749140163639202,\n nphi=99,\n order='r3')\n \n elif name == \"LandremanPaul2021QH\" or name == \"precise QH\":\n \"\"\"\n A fit of the near-axis model to the quasi-helically symmetric\n configuration in Landreman & Paul, arXiv:2108.03711 (2021).\n\n The fit was performed to the boozmn data using the script\n 20211001-02-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=4,\n rc=[1.0033608429348413, 0.19993025252481125, 0.03142704185268144, 0.004672593645851904, 0.0005589954792333977, 3.298415996551805e-05, -7.337736061708705e-06, -2.8829857667619663e-06, -4.51059545517434e-07],\n zs=[0.0, 0.1788824025525348, 0.028597666614604524, 0.004302393796260442, 0.0005283708386982674, 3.5146899855826326e-05, -5.907671188908183e-06, -2.3945326611145963e-06, -6.87509350019021e-07],\n B0=1.003244143729638,\n etabar=-1.5002839921360023,\n B2c=0.37896407142157423,\n nphi=99,\n order='r3')\n\n elif name == \"precise QH+well\":\n \"\"\"\n A fit of the near-axis model to the precise quasi-helically symmetric\n configuration from SIMSOPT with magnetic well.\n\n The fit was performed to the boozmn data using the script\n 20211001-02-Extract_B0_B1_B2_from_boozxform\n \"\"\"\n add_default_args(kwargs,\n nfp=4,\n rc=[1.000474932581454, 0.16345392520298313, 0.02176330066615466, 0.0023779201451133163, 0.00014141976024376502, -1.0595894482659743e-05, -2.9989267970578764e-06, 3.464574408947338e-08],\n zs=[0.0, 0.12501739099323073, 0.019051257169780858, 0.0023674771227236587, 0.0001865909743321566, -2.2659053455802824e-06, -2.368335337174369e-06, -1.8521248561490157e-08],\n B0=0.999440074325872,\n etabar=-1.2115187546668142,\n B2c=0.6916862277166693,\n nphi=99,\n order='r3')\n \n else:\n raise ValueError('Unrecognized configuration name')\n\n return cls(**kwargs)\n\n @classmethod\n def from_cxx(cls, filename):\n \"\"\"\n Load a configuration from a ``qsc_out.<extension>.nc`` output file\n that was generated by the C++ version of QSC. Almost all the\n data will be taken from the output file, over-writing any\n calculations done in python when the new Qsc object is\n created.\n \"\"\"\n def to_string(nc_str):\n \"\"\" Convert a string from the netcdf binary format to a python string. \"\"\"\n temp = [c.decode('UTF-8') for c in nc_str]\n return (''.join(temp)).strip()\n \n f = netcdf.netcdf_file(filename, mmap=False)\n nfp = f.variables['nfp'][()]\n nphi = f.variables['nphi'][()]\n rc = f.variables['R0c'][()]\n rs = f.variables['R0s'][()]\n zc = f.variables['Z0c'][()]\n zs = f.variables['Z0s'][()]\n I2 = f.variables['I2'][()]\n B0 = f.variables['B0'][()]\n spsi = f.variables['spsi'][()]\n sG = f.variables['sG'][()]\n etabar = f.variables['eta_bar'][()]\n sigma0 = f.variables['sigma0'][()]\n order_r_option = to_string(f.variables['order_r_option'][()])\n if order_r_option == 'r2.1':\n order_r_option = 'r3'\n if order_r_option == 'r1':\n p2 = 0.0\n B2c = 0.0\n B2s = 0.0\n else:\n p2 = f.variables['p2'][()]\n B2c = f.variables['B2c'][()]\n B2s = f.variables['B2s'][()]\n\n q = cls(nfp=nfp, nphi=nphi, rc=rc, rs=rs, zc=zc, zs=zs,\n B0=B0, sG=sG, spsi=spsi,\n etabar=etabar, sigma0=sigma0, I2=I2, p2=p2, B2c=B2c, B2s=B2s, order=order_r_option)\n \n def read(name, cxx_name=None):\n if cxx_name is None: cxx_name = name\n setattr(q, name, f.variables[cxx_name][()])\n\n [read(v) for v in ['R0', 'Z0', 'R0p', 'Z0p', 'R0pp', 'Z0pp', 'R0ppp', 'Z0ppp',\n 'sigma', 'curvature', 'torsion', 'X1c', 'Y1c', 'Y1s', 'elongation']]\n if order_r_option != 'r1':\n [read(v) for v in ['X20', 'X2c', 'X2s', 'Y20', 'Y2c', 'Y2s', 'Z20', 'Z2c', 'Z2s', 'B20']]\n if order_r_option != 'r2':\n [read(v) for v in ['X3c1', 'Y3c1', 'Y3s1']]\n \n f.close()\n return q\n \n def min_R0_penalty(self):\n \"\"\"\n This function can be used in optimization to penalize situations\n in which min(R0) < min_R0_constraint.\n \"\"\"\n return np.max((0, self.min_R0_threshold - self.min_R0)) ** 2\n \n"
] | [
[
"numpy.min",
"scipy.io.netcdf.netcdf_file",
"numpy.max",
"numpy.mod",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
TharinduRusira/tvm | [
"b076cad542524cb3744149d953c341b5815f6474",
"b076cad542524cb3744149d953c341b5815f6474"
] | [
"topi/tests/python_cpp/test_topi_reorg.py",
"python/tvm/autotvm/measure/measure_methods.py"
] | [
"\"\"\"Test code for reorg\"\"\"\nimport logging\nimport numpy as np\nimport tvm\nimport topi\nimport topi.testing\nfrom topi.util import get_const_tuple\n\ndef verify_reorg(batch, in_size, in_channel, stride):\n '''Verify reorg operator by comparing outputs from tvm and numpy implementation'''\n in_height = in_width = in_size\n\n A = tvm.placeholder((batch, in_channel, in_height, in_width), name='A')\n B = topi.cpp.vision.reorg(A, stride)\n\n a_shape = get_const_tuple(A.shape)\n dtype = A.dtype\n\n def get_ref_data_reorg():\n '''Randomly initialize the data variables and get refernce output for the reorg operation'''\n a_np = np.random.uniform(size=a_shape).astype(dtype)\n b_np = topi.testing.reorg_python(a_np, stride)\n return a_np, b_np\n\n a_np, b_np = get_ref_data_reorg()\n def check_device(device):\n '''Check the device is available and if so, build and run the program'''\n if not tvm.module.enabled(device):\n print(\"Skip because %s is not enabled\" % device)\n return\n print(\"Running on target: %s\" % device)\n target = topi.cpp.TEST_create_target(device)\n if device == \"llvm\":\n s = topi.cpp.generic.default_schedule(target, [B], False)\n else:\n s = topi.cpp.cuda.schedule_injective(target, [B])\n ctx = tvm.context(device, 0)\n a = tvm.nd.array(a_np, ctx)\n b = tvm.nd.array(np.zeros(get_const_tuple(B.shape), dtype=B.dtype), ctx)\n func = tvm.build(s, [A, B], device, name=\"reorg\")\n func(a, b)\n tvm.testing.assert_allclose(b.asnumpy(), b_np, rtol=1e-5)\n\n for device in ['cuda', 'opencl', 'metal', 'rocm', 'llvm', 'vulkan']:\n check_device(device)\n\ndef test_reorg():\n verify_reorg(1, 38, 64, 2)\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.DEBUG)\n test_reorg()\n",
"# pylint: disable=invalid-name,too-many-function-args,too-many-nested-blocks\n\"\"\"\nFunctions that run on executor for measurement.\n\nThese functions are responsible for building the tvm module, uploading it to\nremote devices, recording the running time costs, and checking the correctness of the output.\n\"\"\"\n\nimport logging\nimport shutil\nimport os\nimport threading\nimport time\nfrom random import getrandbits\nfrom collections import namedtuple\nimport tempfile\n\nimport numpy as np\n\nfrom ... import ir_pass, build, build_config, nd, TVMError, register_func, \\\n rpc as _rpc, target as _target\nfrom ...contrib import nvcc, ndk\n\nfrom ..util import get_const_tuple\nfrom ..env import AutotvmGlobalScope\nfrom ..task.space import InstantiationError\n\nfrom .measure import MeasureResult, MeasureErrorNo, Builder, Runner\nfrom .local_executor import LocalExecutor\n\nlogger = logging.getLogger('autotvm')\n\nclass BuildResult(namedtuple(\"BuildResult\", ('filename', 'arg_info', 'error', 'time_cost'))):\n \"\"\"\n Stores all the necessary inputs for a measurement.\n\n Parameters\n ----------\n filename : str\n The filename of generated library\n arg_info : Tuple\n The shape and dtype information of tvm tensor arguments\n error : Exception\n The error happens during compilation.\n time_cost : float\n The time cost of building\n \"\"\"\n\nclass LocalBuilder(Builder):\n \"\"\"Run compilation on local machine\n\n Parameters\n ----------\n timeout: float\n The timeout of a compilation\n n_parallel: int\n The number of tasks run in parallel. \"None\" will use all cpu cores\n build_func: callable or str\n If is 'default', use default build function\n If is 'ndk', use function for android ndk\n If is callable, use it as custom build function\n \"\"\"\n def __init__(self, timeout=10, n_parallel=None, build_func='default'):\n super(LocalBuilder, self).__init__(timeout, n_parallel)\n\n if isinstance(build_func, str):\n if build_func == 'default':\n build_func = default_build_func\n elif build_func == 'ndk':\n build_func = android_ndk_build_func\n else:\n raise ValueError(\"Invalid build_func\" + build_func)\n\n self.build_func = build_func\n self.executor = LocalExecutor(timeout=timeout)\n self.tmp_dir = tempfile.mkdtemp()\n\n def build(self, measure_inputs):\n results = []\n\n shutil.rmtree(self.tmp_dir)\n self.tmp_dir = tempfile.mkdtemp()\n\n for i in range(0, len(measure_inputs), self.n_parallel):\n futures = []\n for inp in measure_inputs[i:i + self.n_parallel]:\n ret = self.executor.submit(self.build_func,\n inp,\n self.tmp_dir,\n **self.build_kwargs)\n futures.append(ret)\n\n for future in futures:\n res = future.get()\n\n if isinstance(res, Exception):\n # timeout or fleet error, return MeasureResult directly\n results.append(MeasureResult((res,), MeasureErrorNo.BUILD_TIMEOUT,\n self.timeout, time.time()))\n elif res.error is not None:\n # instantiation error\n if isinstance(res.error, InstantiationError):\n results.append(MeasureResult((res.error,),\n MeasureErrorNo.INSTANTIATION_ERROR,\n res.time_cost, time.time()))\n else:\n if \"InstantiationError\" in str(res.error):\n msg = str(res.error)\n try:\n msg = msg.split('\\n')[-2].split(\": \")[1]\n except Exception: # pylint: disable=broad-except\n pass\n results.append(MeasureResult((InstantiationError(msg),),\n MeasureErrorNo.INSTANTIATION_ERROR,\n res.time_cost, time.time()))\n else: # tvm error\n results.append(MeasureResult((res.error,),\n MeasureErrorNo.COMPILE_HOST,\n res.time_cost, time.time()))\n else:\n # return BuildResult\n results.append(res)\n\n return results\n\n\nclass RPCRunner(Runner):\n \"\"\"Run generated code on remove devices.\n This function will ask a RPC Tracker to get device for measurement.\n\n Parameters\n ----------\n timeout: float\n The timeout of a compilation\n n_parallel: int\n The number of tasks run in parallel. \"None\" will use all cpu cores\n key: str\n The key of the device registered in the tracker\n host: str\n The host address of RPC Tracker\n port: int\n The port of RPC Tracker\n number : int, optional\n Number of times to do measurement for tasking average\n repeat : int, optional\n Number of times to repeat the measurement.\n In total, the generated code will be run (1 + number x repeat) times,\n where the first one is warm up. The returned result contains `repeat` costs,\n min_repeat_ms : float, optional\n Minimum duration of a timer measurement in milliseconds.\n When the run time of a measurement trial falls below this time, the\n `number` parameter will be automatically increased.\n Set this to improve the accuracy of perf measurement, e.g., when timers\n are not precise enough to capture short-running tasks. This parameter is\n also critical when devices need a certain minimum running time to \"warm\n up,\" such as GPUs that need time to reach a performance power state.\n cooldown_interval: float, optional\n The cool down interval between two measurements.\n check_correctness: bool, optional\n Whether check correctness after measurement. This will use llvm cpu target to\n call your template and get the reference output.\n This can work for TOPI templates, but may not work for your custom template.\n \"\"\"\n def __init__(self,\n key, host, port, priority=1,\n timeout=10, n_parallel=None,\n number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,\n check_correctness=False):\n super(RPCRunner, self).__init__(timeout, n_parallel)\n\n self.key = key\n self.host = host\n self.port = port\n self.priority = priority\n self.timeout = timeout\n\n self.number = number\n self.repeat = repeat\n self.min_repeat_ms = min_repeat_ms\n self.cur_number = number\n\n self.ref_input = None\n self.ref_output = None\n self.check_correctness = check_correctness\n self.cooldown_interval = cooldown_interval\n\n self.executor = LocalExecutor()\n\n def set_task(self, task):\n self.task = task\n self.cur_number = self.number\n\n if check_remote(task.target, self.key, self.host, self.port):\n logger.info(\"Get devices for measurement successfully!\")\n else:\n raise RuntimeError(\"Cannot get remote devices from the tracker. \"\n \"Please check the status of tracker by \"\n \"'python -m tvm.exec.query_rpc_tracker --port [THE PORT YOU USE]' \"\n \"and make sure you have free devices on the queue status.\")\n\n if self.check_correctness:\n # use llvm cpu to generate a reference input/output\n # this option works for tuning topi, but might not work for you custom op\n with _target.create(\"llvm\"):\n s, arg_bufs = task.instantiate(task.config_space.get(0))\n self.ref_input = [np.random.uniform(size=get_const_tuple(x.shape)).astype(x.dtype)\n for x in arg_bufs]\n func = build(s, arg_bufs, \"llvm\")\n tvm_buf = [nd.array(x) for x in self.ref_input]\n func(*tvm_buf)\n self.ref_output = [x.asnumpy() for x in tvm_buf]\n\n def get_build_kwargs(self):\n kwargs = {}\n if 'cuda' in self.task.target.keys or 'opencl' in self.task.target.keys:\n remote = request_remote(self.key, self.host, self.port)\n ctx = remote.context(str(self.task.target), 0)\n max_dims = ctx.max_thread_dimensions\n kwargs['check_gpu'] = {\n 'max_shared_memory_per_block': ctx.max_shared_memory_per_block,\n 'max_threads_per_block': ctx.max_threads_per_block,\n 'max_thread_x': max_dims[0],\n 'max_thread_y': max_dims[1],\n 'max_thread_z': max_dims[2],\n }\n\n if 'cuda' in self.task.target.keys:\n kwargs[\"cuda_arch\"] = \"sm_\" + \"\".join(ctx.compute_version.split('.'))\n\n return kwargs\n\n def run(self, measure_inputs, build_results):\n results = []\n remote_args = (self.key, self.host, self.port, self.priority, self.timeout)\n\n for i in range(0, len(measure_inputs), self.n_parallel):\n futures = []\n for measure_inp, build_res in zip(measure_inputs[i:i+self.n_parallel],\n build_results[i:i+self.n_parallel]):\n ret = self.executor.submit(run_through_rpc,\n measure_inp,\n build_res,\n self.cur_number,\n self.repeat,\n self.cooldown_interval,\n remote_args,\n self.ref_input,\n self.ref_output)\n futures.append(ret)\n\n for future in futures:\n res = future.get()\n if isinstance(res, Exception): # executor error or timeout\n results.append(MeasureResult((str(res),), MeasureErrorNo.RUN_TIMEOUT,\n self.timeout, time.time()))\n else:\n results.append(res)\n\n # If some runs were too fast, do remeasure for them\n # to meet the requirement of `min_repeat_ms`\n remeasure = np.zeros((len(measure_inputs),), dtype=np.bool)\n pre_number = next_number = self.cur_number\n min_repeat_duration = self.min_repeat_ms / 1000.0\n for i, res in enumerate(results):\n if res.error_no == MeasureErrorNo.NO_ERROR:\n if np.mean(res.costs) * pre_number <= min_repeat_duration:\n next_number = max(next_number,\n int(np.ceil(min_repeat_duration / np.mean(res.costs))))\n remeasure[i] = True\n\n if pre_number != next_number:\n self.cur_number = next_number\n msg = \"increasing number to %d\" % self.cur_number\n logger.info(msg)\n\n re_measure_inputs = [x for i, x in enumerate(measure_inputs) if remeasure[i]]\n re_build_results = [x for i, x in enumerate(build_results) if remeasure[i]]\n re_res = self.run(re_measure_inputs, re_build_results)\n ct = 0\n for i, rerun in enumerate(remeasure):\n if rerun:\n results[i] = re_res[ct]\n ct += 1\n\n return results\n\nclass LocalRunner(RPCRunner):\n \"\"\"Run generated code on local devices.\n\n Parameters\n ----------\n timeout: float\n The timeout of a compilation\n number : int, optional\n Number of times to do measurement for tasking average\n repeat : int, optional\n Number of times to repeat the measurement.\n In total, the generated code will be run (1 + number x repeat) times,\n where the first one is warm up. The returned result contains `repeat` costs,\n each of which is the average of `number` test run.\n min_repeat_ms : float, optional\n Minimum duration of a timer measurement in milliseconds.\n When the run time of a measurement trial falls below this time, the\n `number` parameter will be automatically increased.\n Set this to improve the accuracy of perf measurement, e.g., when timers\n are not precise enough to capture short-running tasks. This parameter is\n also critical when devices need a certain minimum running time to \"warm\n up,\" such as GPUs that need time to reach a performance power state.\n cooldown_interval: float, optional\n The cool down interval between two measurements.\n check_correctness: bool, optional\n Whether check correctness after measurement. This will use llvm cpu target to\n call your template and get the reference output.\n This can work for TOPI templates, but may not work for your custom template.\n\n Note\n ----\n This is a \"fake\" local mode. We start a silent rpc tracker and rpc server\n for the user. In this way we reuse timeout/isolation mechanism in RPC infrastructure.\n \"\"\"\n def __init__(self,\n timeout=10,\n number=4, repeat=3, min_repeat_ms=0, cooldown_interval=0.1,\n check_correctness=False):\n super(LocalRunner, self).__init__('', None, None, 0,\n timeout=timeout, n_parallel=1,\n number=number, repeat=repeat,\n min_repeat_ms=min_repeat_ms,\n cooldown_interval=cooldown_interval,\n check_correctness=check_correctness)\n self.tracker = None\n self.server = None\n\n def set_task(self, task):\n self.task = task\n\n from ...rpc.tracker import Tracker\n from ...rpc.server import Server\n\n tracker = Tracker('localhost', port=9000, port_end=10000, silent=True)\n device_key = '$local$device$%d' % tracker.port\n server = Server('localhost', port=9000, port_end=10000,\n key=device_key,\n use_popen=True, silent=True,\n tracker_addr=(tracker.host, tracker.port))\n self.key = device_key\n self.host = tracker.host\n self.port = tracker.port\n\n super(LocalRunner, self).set_task(task)\n return server, tracker\n\n\ndef _build_func_common(measure_input, check_gpu=None, cuda_arch=None, build_option=None):\n \"\"\"Common part for building a configuration\"\"\"\n target, task, config = measure_input\n\n with target:\n s, args = task.instantiate(config)\n\n # check invalidity of template and code hash consistency\n if not config.valid():\n raise InstantiationError(config.errors)\n\n opts = build_option or {}\n if check_gpu: # Add verify pass to filter out invalid configs in advance.\n opts[\"add_lower_pass\"] = [(2, gpu_verify_pass(**check_gpu))]\n if cuda_arch:\n set_cuda_target_arch(cuda_arch)\n\n with build_config(**opts):\n func = build(s, args, target_host=task.target_host)\n return func, tuple((get_const_tuple(x.shape), x.dtype) for x in args)\n\n\ndef default_build_func(measure_input, tmp_dir, **kwargs):\n \"\"\"\n Default build func. This can work for cuda, opencl, llvm backend\n\n Parameters\n ----------\n measure_input: MeasureInput\n The input of measurement\n tmp_dir: str\n The path of temporary directory to export generated library\n \"\"\"\n tic = time.time()\n try:\n filename = os.path.join(tmp_dir, \"tmp_func_%0x.tar\" % getrandbits(64))\n func, arg_info = _build_func_common(measure_input, **kwargs)\n func.export_library(filename)\n except Exception as e: # pylint: disable=broad-except\n return BuildResult(None, None, e, time.time() - tic)\n return BuildResult(filename, arg_info, None, time.time() - tic)\n\n\ndef android_ndk_build_func(measure_input, tmp_dir, **kwargs):\n \"\"\"\n Build function for android device using ndk.\n\n Parameters\n ----------\n measure_input: MeasureInput\n The input of measurement\n tmp_dir: str\n The path of temporary directory to export generated library\n \"\"\"\n tic = time.time()\n try:\n filename = os.path.join(tmp_dir, \"tmp_func_%0x.so\" % getrandbits(64))\n func, arg_info = _build_func_common(measure_input, **kwargs)\n func.export_library(filename, ndk.create_shared)\n except Exception as e: # pylint: disable=broad-except\n return BuildResult(None, None, e, time.time() - tic)\n return BuildResult(filename, arg_info, None, time.time() - tic)\n\n\ndef run_through_rpc(measure_input, build_result,\n number, repeat, cooldown_interval,\n remote_args, ref_input=None, ref_output=None):\n \"\"\"Run a generated library through rpc\n\n Parameters\n ----------\n measure_input: MeasureInput\n The raw measure input\n build_result: BuildResult\n The result returned from Builder. This contains the path to the generated library.\n number : int, optional\n Number of times to do measurement for tasking average\n repeat : int, optional\n Number of times to repeat the measurement.\n In total, the generated code will be run (1 + number x repeat) times,\n where the first one is warm up. The returned result contains `repeat` costs,\n each of which is the average of `number` test run.\n cooldown_interval: float\n The cool down interval between two measurements\n remote_args: Tuple\n The argument for request_remote\n ref_input: List of np.ndarray\n The reference input used for checking correctness\n ref_output: List of np.ndarray\n The reference output used for checking correctness\n \"\"\"\n if isinstance(build_result, MeasureResult):\n return build_result\n\n tic = time.time()\n errno = MeasureErrorNo.NO_ERROR\n try:\n # upload built module\n remote = request_remote(*remote_args)\n remote.upload(build_result.filename)\n func = remote.load_module(os.path.split(build_result.filename)[1])\n ctx = remote.context(str(measure_input.target), 0)\n time_f = func.time_evaluator(\n func.entry_name, ctx, number=number, repeat=repeat)\n\n # set input\n if ref_input:\n args = [nd.array(x, ctx=ctx) for x in ref_input]\n else:\n # create empty arrays on the remote device and copy them once.\n # This can avoid some memory issues that make the measurment results unreliable.\n args = [nd.empty(x[0], dtype=x[1], ctx=ctx) for x in build_result.arg_info]\n args = [nd.array(x, ctx=ctx) for x in args]\n ctx.sync()\n\n costs = time_f(*args).results\n\n # clean up remote files\n remote.remove(build_result.filename)\n remote.remove(os.path.splitext(build_result.filename)[0] + '.so')\n remote.remove('')\n\n if len(costs) > 2: # remove largest and smallest value to reduce variance\n costs = list(costs)\n costs.sort()\n costs = tuple(costs[1:-1])\n\n # check correctness of output\n if ref_output:\n for expected, real in zip(ref_output, args):\n if not np.allclose(expected, real.asnumpy(), rtol=1e-4):\n logger.warning(\"Wrong Answer!\")\n errno = MeasureErrorNo.WRONG_ANSWER\n except TVMError as exc:\n msg = str(exc)\n if \"Stack trace returned\" in msg:\n msg = msg[:msg.index(\"Stack trace returned\")]\n if \"CUDA Source\" in msg:\n msg = msg[:msg.index(\"CUDA Source\")]\n costs = (RuntimeError(msg[:1024]),)\n errno = MeasureErrorNo.RUNTIME_DEVICE\n tstamp = time.time()\n time.sleep(cooldown_interval)\n return MeasureResult(costs, errno, tstamp - tic + build_result.time_cost, tstamp)\n\n\ndef request_remote(device_key, host=None, port=None, priority=1, timeout=60):\n \"\"\"Request a remote session\n\n Parameters\n ----------\n device_key: string\n The device key of registered device in tracker\n host: host, optional\n The host address of rpc tracker.\n If is none, will use environment variable \"TVM_TRACKER_HOST\"\n port: int, optional\n The port of rpc tracker.\n If is none, will use environment variable \"TVM_TRACKER_PORT\"\n priority: int, optional\n The priority of this request, larger is more prior\n timeout: float, optional\n The timeout of this session (units: second)\n\n Returns\n ------\n session: RPCSession\n \"\"\"\n # connect to the tracker\n host = host or os.environ['TVM_TRACKER_HOST']\n port = port or int(os.environ['TVM_TRACKER_PORT'])\n\n tracker = _rpc.connect_tracker(host, port)\n remote = tracker.request(device_key, priority=priority,\n session_timeout=timeout)\n return remote\n\n\ndef check_remote(target, device_key, host=None, port=None, priority=100, timeout=10):\n \"\"\"\n Check the availability of a remote device\n\n Parameters\n ----------\n target: Target\n The wanted compilation target\n device_key: string\n device key of registered device in tracker\n host: host, optional\n The host address of rpc tracker.\n If is none, will use environment variable \"TVM_TRACKER_HOST\"\n port: int, optional\n The port address of rpc tracker.\n If is none, will use environment variable \"TVM_TRACKER_PORT\"\n priority: int, optional\n The priority of this request, larger is more prior\n timeout: float, optional\n The timeout of this check (units: seconds).\n\n Returns\n -------\n available: bool\n True if can find available device\n \"\"\"\n def _check():\n remote = request_remote(device_key, host, port, priority)\n ctx = remote.context(str(target))\n while not ctx.exist: # wait until we get an available device\n pass\n t = threading.Thread(target=_check,)\n t.start()\n t.join(timeout)\n return not t.is_alive()\n\n\n@register_func\ndef tvm_callback_cuda_compile(code):\n \"\"\"use nvcc to generate ptx code for better optimization\"\"\"\n ptx = nvcc.compile_cuda(code, target=\"ptx\", arch=AutotvmGlobalScope.current.cuda_target_arch)\n return ptx\n\n\ndef set_cuda_target_arch(arch):\n \"\"\"set target architecture of nvcc compiler\n\n Parameters\n ----------\n arch: str\n The argument of nvcc -arch. (e.g. \"sm_51\", \"sm_62\")\n \"\"\"\n AutotvmGlobalScope.current.cuda_target_arch = arch\n\n\ndef gpu_verify_pass(**kwargs):\n \"\"\"Verify the validity of a gpu kernel.\n This pass will check memory usage and number of threads per block.\n \"\"\"\n def verify_pass(stmt):\n valid = ir_pass.VerifyGPUCode(stmt, kwargs)\n if not valid:\n raise InstantiationError(\"Skipped because of invalid gpu kernel\")\n return stmt\n return verify_pass\n"
] | [
[
"numpy.random.uniform"
],
[
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
thunderball7cd/night-to-day-image-processing | [
"388a082241af4bc67e770c76f207b58b330063d9"
] | [
"src/denoising/torch_utils.py"
] | [
"\"\"\"\n## CycleISP: Real Image Restoration Via Improved Data Synthesis\n## Syed Waqas Zamir, Aditya Arora, Salman Khan, Munawar Hayat, Fahad Shahbaz Khan, Ming-Hsuan Yang, and Ling Shao\n## CVPR 2020\n## https://arxiv.org/abs/2003.07761\n\"\"\"\n\nfrom collections import OrderedDict\n\nimport torch\n\n\ndef load_checkpoint(model, weights):\n checkpoint = torch.load(weights, map_location='cpu')\n try:\n model.load_state_dict(checkpoint[\"state_dict\"])\n except:\n state_dict = checkpoint[\"state_dict\"]\n new_state_dict = OrderedDict()\n for k, v in state_dict.items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n model.load_state_dict(new_state_dict)"
] | [
[
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wakataw/ipython-dawet-sql | [
"af17db523bfcee236e2bae7cc36995e5b41f6c36"
] | [
"dawetsql/odbc_sql.py"
] | [
"import logging\nimport pypyodbc\nimport sys\n\nfrom pandas import DataFrame, read_sql, concat\nfrom IPython.core import magic_arguments\nfrom IPython.core.magic import magics_class, Magics, line_magic, cell_magic\nfrom dawetsql.widgets import SchemaExplorer\nfrom . import utils\nfrom cryptography.fernet import Fernet\n\n\n@magics_class\nclass OdbcSqlMagics(Magics):\n conn = None\n chunksize = 500\n reconnect = False\n max_retry = 3\n retry = 0\n __user = None\n __password = None\n __dsn = None\n __conn_string = None\n\n def __init__(self, *args, **kwargs):\n super(OdbcSqlMagics, self).__init__(*args, **kwargs)\n\n def __connect(self, dsn, username, password, connection_string, verbose=True):\n \"\"\"\n Open database connection\n :param dsn: ODBC DSN\n :return:\n \"\"\"\n try:\n if connection_string:\n self.conn = pypyodbc.connect(connection_string)\n else:\n self.conn = pypyodbc.connect(\"DSN={};Username={};Password={}\".format(dsn, username, password))\n if self.conn and verbose:\n print(\"Connected to {}\".format(dsn))\n except Exception as e:\n logging.error(e)\n return\n\n @line_magic('dawetsql')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-u', '--user', type=str, help=\"Dawet User\")\n @magic_arguments.argument('-p', '--password', type=str, help=\"Dawet Password\")\n @magic_arguments.argument('-d', '--dsn', type=str, help=\"Dawet DSN\")\n @magic_arguments.argument('-x', '--connection', type=str, help=\"ODBC Connection String\")\n @magic_arguments.argument('-c', '--chunksize', type=int, default=100, help=\"ODBC Fetch size\")\n @magic_arguments.argument('-a', '--reconnect', action='store_true', help='Auto Reconnect')\n @magic_arguments.argument('-r', '--retry', type=int, default=3, help='Max Retry')\n def odbc_connect(self, arg):\n \"\"\"\n Open Database Connection line magic method\n :param arg: ODBC DSN\n :return:\n \"\"\"\n if self.conn:\n self.odbc_disconnect()\n\n args = magic_arguments.parse_argstring(self.odbc_connect, arg)\n\n self.chunksize = args.chunksize\n self.max_retry = args.retry\n\n if args.reconnect:\n self.reconnect = True\n self.chipper = self.generate_chipper()\n self.__dsn = args.dsn\n self.__user = args.user\n \n if args.password:\n self.__password = self.chipper.encrypt(args.password.encode('utf8'))\n\n if args.connection:\n self.__conn_string = self.chipper.encrypt(args.connection.encode('utf8'))\n else:\n self.__conn_string = False\n\n return self.__connect(args.dsn, args.user, args.password, args.connection)\n\n @line_magic('dawetsqlclose')\n def odbc_disconnect(self, *args, **kwargs):\n \"\"\"\n Close Database Connection line magic method\n :return:\n \"\"\"\n try:\n self.conn.close()\n print(\"Disconnected\")\n except:\n pass\n finally:\n self.conn = None\n return\n\n @line_magic('dawetsqlreconnect')\n def odbc_reconnect(self, args=None, cell=None):\n if not self.reconnect:\n logging.error(\"You did not use reconnect arguments, try re initialize dawetsql with -a/--reconnect argument\")\n return\n\n self.odbc_disconnect()\n\n if self.__conn_string:\n connection_string = self.chipper.decrypt(self.__conn_string).decode('utf8')\n else:\n connection_string = False\n \n if self.__password:\n password = self.chipper.decrypt(self.__password).decode('utf8')\n else:\n password = None\n\n return self.__connect(self.__dsn, self.__user, password, connection_string, verbose=False)\n\n @cell_magic('dawetsql')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-l', '--limit', type=int, default=10, help=\"Set result limit\")\n @magic_arguments.argument('-o', '--ouput', default='_', type=str, help=\"File or Variable name for results data\")\n def odbc_sql(self, arg, cell=None):\n \"\"\"\n Run SQL Query\n :param arg: optional argument\n :param cell: SQL Query string\n :return:\n \"\"\"\n args = magic_arguments.parse_argstring(self.odbc_sql, arg)\n varname = args.ouput.strip()\n\n ok, valid_name = utils.validate_name(varname)\n query = ' '.join(cell.strip().split())\n\n if not ok:\n logging.error(\"Cannot proceed with `{}` as output name\".format(varname))\n return\n\n if not self.conn:\n logging.error(\n \"Please open connection first using %dawetsql line magic\")\n return\n\n if valid_name != '_':\n if valid_name.lower().endswith('.csv'):\n self.to_csv(query, valid_name)\n return\n elif valid_name.lower().endswith('.pkl'):\n self.to_pickle(query, valid_name)\n return\n else:\n self.to_dataframe(query, valid_name, download=True)\n return\n\n return self.to_dataframe(utils.limit_query(query, args.limit), valid_name)\n\n def download(self, query):\n utils.log_query(self.__user, query)\n data = None\n try:\n data = read_sql(query, self.conn, chunksize=self.chunksize)\n except Exception as e:\n logging.error(e.__class__.__name__)\n logging.error(e)\n\n if utils.teiid_resource_exception.findall(str(e)) and self.reconnect:\n if self.retry >= self.max_retry:\n self.retry = 0\n raise Exception('Max Retry Exception')\n\n self.retry += 1\n self.odbc_reconnect()\n return self.download(query)\n else:\n raise e\n\n return data\n\n def get_dataframe(self, query, verbose=True):\n \"\"\"\n Store query result to dataframe\n :param query: SQL Query\n :return: pandas dataframe\n :verbose: print process to stdout\n \"\"\"\n print(\"Fetching result\", flush=True) if verbose else None\n\n result = self.download(query)\n\n if result is None:\n return\n\n total = 0\n df_list = []\n\n for chunk in result:\n df_list.append(chunk)\n total += len(chunk)\n self.print_process(total) if verbose else None\n\n if df_list:\n df = concat(df_list, ignore_index=True)\n return df\n\n return DataFrame()\n\n def to_csv(self, query, filename):\n \"\"\"\n Export query result to csv\n :param query: SQL Query\n :param filename: csv filename\n :return:\n \"\"\"\n result = self.download(query)\n\n if result is None:\n return\n\n total = 0\n header = True\n\n for chunk in result:\n if header:\n mode = 'w'\n else:\n mode = 'a'\n chunk.to_csv(filename, index=False, mode=mode, header=header)\n total += len(chunk)\n self.print_process(total)\n header = False\n\n def to_dataframe(self, query, varname, download=False):\n \"\"\"\n Store dataframe to shell variable\n :param query: SQL query\n :param varname: Dataframe variable name\n :param download: Download or just preview query result\n :return:\n \"\"\"\n df = self.get_dataframe(query)\n\n if df is None:\n return\n\n self.shell.user_ns[varname] = df\n if not download:\n return df\n\n def to_pickle(self, query, pickle_name):\n \"\"\"\n Export query result to python pickle\n :param query: SQL Query\n :param pickle_name: pickle file name\n :return:\n \"\"\"\n df = self.get_dataframe(query)\n\n if df is None:\n return\n\n df.to_pickle(pickle_name)\n\n @line_magic('explorer')\n @magic_arguments.magic_arguments()\n @magic_arguments.argument('-f', '--force', action='store_true', help=\"Force explorer to re-index schema\")\n def explore_schema(self, arg):\n \"\"\"\n Display schema explorer widgets\n :return:\n \"\"\"\n args = magic_arguments.parse_argstring(self.explore_schema, arg)\n\n print('Fetching schema detail..')\n\n explorer = SchemaExplorer(self)\n explorer.show(force=args.force)\n\n @staticmethod\n def generate_chipper():\n return Fernet(Fernet.generate_key())\n\n @staticmethod\n def print_process(total):\n sys.stdout.write(\"\\rTotal {} row(s) downloaded\".format(total))\n sys.stdout.flush()\n\n def __del__(self):\n if self.conn:\n self.conn.close()\n\n self.conn = None\n"
] | [
[
"pandas.read_sql",
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Lumonk/CNNs.PyTorch | [
"d634fb63c86bc135e7be79102983045696bdaed4"
] | [
"modules/dfx_modules.py"
] | [
"from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom .dfx import quant, quant_grad, quant_fb\n\n__all__ = ['QLinear', 'QConv2d', 'QBatchNorm2d']\n\nclass QLinear(nn.Linear):\n\n def __init__(self, in_features, out_features, bias=True, weight_copy=True):\n super(QLinear, self).__init__(in_features, out_features, bias=bias)\n\n\n # params define\n self.num_bits = 16\n self.add_noise = False \n\n self.weight_params = (self.num_bits, self.num_bits, self.add_noise)\n self.act_params = (self.num_bits, self.num_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n def forward(self, input):\n\n\n if self.weight_copy:\n weight = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n bias = self.bias \n else:\n bias = None\n output = F.linear(input, weight, bias)\n else:\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n else:\n self.bias = None\n\n output = F.linear(input, self.weight, self.bias)\n\n output = quant_fb(output, *(self.act_params))\n\n return output\n\n def extra_repr(self):\n s = 'DFX_Linear, in_features={in_features}, out_features={out_features}, '\n s += 'dfx = {{ {num_bits}, add_noise = {add_noise} }}'\n if self.bias is not None:\n s += ', bias=True'\n return s.format(**self.__dict__)\n\n\nclass QConv2d(nn.Conv2d):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=False,\n weight_copy=True):\n super(QConv2d, self).__init__(in_channels, out_channels, kernel_size, stride=stride,\n padding=padding, dilation=dilation, groups=groups, bias=bias)\n # params define\n self.fw_bits = 8\n self.bw_bits = 8\n self.add_noise = False\n\n self.weight_params = (self.fw_bits, self.bw_bits, self.add_noise)\n self.act_params = (self.fw_bits, self.bw_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n def forward(self, input):\n\n if self.weight_copy:\n weight = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n bias = self.bias\n else:\n bias = None\n output = F.conv2d(input, weight, bias, self.stride,\n self.padding, self.dilation, self.groups)\n else:\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n if self.bias is not None:\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n else:\n self.bias = None\n\n output = F.conv2d(input, self.weight, self.bias, self.stride,\n self.padding, self.dilation, self.groups)\n\n output = quant_fb(output, *(self.act_params))\n return output\n\n def extra_repr(self):\n s = ('DFX_CONV, {in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}, dfx = {{ {fw_bits}, {bw_bits}, add_noise={add_noise}}}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.output_padding != (0,) * len(self.output_padding):\n s += ', output_padding={output_padding}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n return s.format(**self.__dict__)\n\n\nclass QBatchNorm2d(nn.Module):\n\n def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True,\n track_running_stats=True, weight_copy=True):\n super(QBatchNorm2d, self).__init__()\n # quantization parameters:\n\n self.num_features = num_features\n self.eps = eps\n self.momentum = momentum\n self.affine = affine\n self.track_running_stats = track_running_stats\n\n # params define\n self.num_bits = 8\n self.add_noise = False\n self.weight_params = (self.num_bits, self.num_bits, self.add_noise)\n self.act_params = (self.num_bits, self.num_bits, self.add_noise)\n self.weight_copy = weight_copy\n\n\n if self.affine:\n self.bias = nn.Parameter(torch.Tensor(num_features))\n self.weight = nn.Parameter(torch.Tensor(num_features))\n else:\n self.register_parameter('weights', None)\n self.register_parameter('bias', None)\n\n if self.track_running_stats:\n self.register_buffer('running_mean', torch.zeros(num_features))\n self.register_buffer('running_var', torch.ones(num_features))\n self.register_buffer('num_batches_tracked', torch.tensor(0, dtype=torch.long))\n else:\n self.register_parameter('running_mean', None)\n self.register_parameter('running_var', None)\n self.register_buffer('num_batches_tracked', None)\n self.reset_parameters()\n\n\n def reset_running_stats(self):\n if self.track_running_stats:\n self.running_mean.zero_()\n self.running_var.fill_(1)\n self.num_batches_tracked.zero_()\n\n def reset_parameters(self):\n self.reset_running_stats()\n if self.affine:\n self.weight.data.uniform_()\n self.bias.data.zero_()\n\n def _check_input_dim(self, input):\n if input.dim() != 4:\n raise ValueError('expected 4D input (got {}D input)'\n .format(input.dim()))\n\n def forward(self, input):\n self._check_input_dim(input)\n\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None:\n self.num_batches_tracked += 1\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n\n if self.weight_copy:\n # weight = quant_fb(self.weight, *(self.weight_params))\n weight = self.weight\n bias = self.bias \n\n out = F.batch_norm(\n input, self.running_mean, self.running_var, weight, bias,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n else:\n\n self.weight.data = quant_fb(self.weight, *(self.weight_params))\n self.bias.data = quant_fb(self.bias, *(self.bias_params))\n\n out = F.batch_norm(\n input, self.running_mean, self.running_var, self.weight, self.bias,\n self.training or not self.track_running_stats,\n exponential_average_factor, self.eps)\n\n out = quant_fb(out, *(self.act_params))\n\n return out\n\n def extra_repr(self):\n s = 'DFX_BN, {num_features}, eps={eps}, momentum={momentum}, affine={affine}, ' \\\n 'track_running_stata={track_running_stats}, '\n s += ' dfx = {{ {num_bits}, add_noise={add_noise} }}'\n return s.format(**self.__dict__)\n\n def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs):\n version = local_metadata.get('version', None)\n\n if (version is None or version < 2) and self.track_running_stats:\n # at version 2: added num_batches_tracked buffer\n # this should have a default value of 0\n num_batches_tracked_key = prefix + 'num_batches_tracked'\n if num_batches_tracked_key not in state_dict:\n state_dict[num_batches_tracked_key] = torch.tensor(0, dtype=torch.long)\n\n super(QBatchNorm2d, self)._load_from_state_dict(\n state_dict, prefix, local_metadata, strict,\n missing_keys, unexpected_keys, error_msgs)\n\n"
] | [
[
"torch.nn.functional.batch_norm",
"torch.ones",
"torch.Tensor",
"torch.zeros",
"torch.nn.functional.conv2d",
"torch.tensor",
"torch.nn.functional.linear"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
VT-ASIM-LAB/autoware.ai | [
"211dff3bee2d2782cb10444272c5d98d1f30d33a",
"211dff3bee2d2782cb10444272c5d98d1f30d33a"
] | [
"jsk_recognition/jsk_recognition_utils/python/jsk_recognition_utils/depth.py",
"jsk_recognition/jsk_perception/node_scripts/vgg16_object_recognition.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom skimage.segmentation import slic\nfrom skimage.feature import peak_local_max\nfrom skimage.morphology import binary_closing\n\nfrom jsk_recognition_utils.mask import descent_closing\n\n\ndef split_fore_background(depth_img, footprint=None):\n if footprint is None:\n footprint = np.ones((3, 3))\n segments = slic(depth_img)\n\n local_maxi = peak_local_max(\n depth_img, labels=segments, footprint=footprint, indices=False)\n\n fg_mask = descent_closing(local_maxi, init_selem=np.ones((3, 3)), n_times=6)\n bg_mask = ~fg_mask\n return fg_mask, bg_mask\n",
"#!/usr/bin/env python\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n\nimport itertools, pkg_resources, sys\nfrom distutils.version import LooseVersion\nif LooseVersion(pkg_resources.get_distribution(\"chainer\").version) >= LooseVersion('7.0.0') and \\\n sys.version_info.major == 2:\n print('''Please install chainer <= 7.0.0:\n\n sudo pip install chainer==6.7.0\n\nc.f https://github.com/jsk-ros-pkg/jsk_recognition/pull/2485\n''', file=sys.stderr)\n sys.exit(1)\nif [p for p in list(itertools.chain(*[pkg_resources.find_distributions(_) for _ in sys.path])) if \"cupy-\" in p.project_name ] == []:\n print('''Please install CuPy\n\n sudo pip install cupy-cuda[your cuda version]\ni.e.\n sudo pip install cupy-cuda91\n\n''', file=sys.stderr)\n sys.exit(1)\nimport chainer\nfrom chainer import cuda\nimport chainer.serializers as S\nfrom chainer import Variable\nfrom distutils.version import LooseVersion\nimport numpy as np\nimport skimage.transform\n\nimport cv_bridge\nfrom jsk_recognition_msgs.msg import ClassificationResult\nfrom jsk_recognition_utils.chainermodels import VGG16\nfrom jsk_recognition_utils.chainermodels import VGG16BatchNormalization\nfrom jsk_topic_tools import ConnectionBasedTransport\nfrom jsk_topic_tools.log_utils import logerr_throttle\nimport message_filters\nimport rospy\nfrom sensor_msgs.msg import Image\n\n\nclass VGG16ObjectRecognition(ConnectionBasedTransport):\n\n mean_bgr = np.array([104.00698793, 116.66876762, 122.67891434])\n\n def __init__(self):\n super(self.__class__, self).__init__()\n self.insize = 224\n self.gpu = rospy.get_param('~gpu', -1)\n self.target_names = rospy.get_param('~target_names')\n self.model_name = rospy.get_param('~model_name')\n if self.model_name == 'vgg16':\n self.model = VGG16(n_class=len(self.target_names))\n elif self.model_name == 'vgg16_batch_normalization':\n self.model = VGG16BatchNormalization(\n n_class=len(self.target_names))\n else:\n rospy.logerr('Unsupported ~model_name: {0}'\n .format(self.model_name))\n model_file = rospy.get_param('~model_file')\n S.load_hdf5(model_file, self.model)\n if self.gpu != -1:\n self.model.to_gpu(self.gpu)\n self.pub = self.advertise('~output', ClassificationResult,\n queue_size=1)\n self.pub_input = self.advertise(\n '~debug/net_input', Image, queue_size=1)\n\n def subscribe(self):\n if rospy.get_param('~use_mask', False):\n # larger buff_size is necessary for taking time callback\n # http://stackoverflow.com/questions/26415699/ros-subscriber-not-up-to-date/29160379#29160379 # NOQA\n sub = message_filters.Subscriber(\n '~input', Image, queue_size=1, buff_size=2**24)\n sub_mask = message_filters.Subscriber(\n '~input/mask', Image, queue_size=1, buff_size=2**24)\n self.subs = [sub, sub_mask]\n queue_size = rospy.get_param('~queue_size', 10)\n if rospy.get_param('~approximate_sync', False):\n slop = rospy.get_param('~slop', 0.1)\n sync = message_filters.ApproximateTimeSynchronizer(\n self.subs, queue_size=queue_size, slop=slop)\n else:\n sync = message_filters.TimeSynchronizer(\n self.subs, queue_size=queue_size)\n sync.registerCallback(self._recognize)\n else:\n sub = rospy.Subscriber(\n '~input', Image, self._recognize, callback_args=None,\n queue_size=1, buff_size=2**24)\n self.subs = [sub]\n\n def unsubscribe(self):\n for sub in self.subs:\n sub.unregister()\n\n def _recognize(self, imgmsg, mask_msg=None):\n bridge = cv_bridge.CvBridge()\n bgr = bridge.imgmsg_to_cv2(imgmsg, desired_encoding='bgr8')\n if mask_msg is not None:\n mask = bridge.imgmsg_to_cv2(mask_msg)\n if mask.shape != bgr.shape[:2]:\n logerr_throttle(10,\n 'Size of input image and mask is different')\n return\n elif mask.size == 0:\n logerr_throttle(10, 'Size of input mask is 0')\n return\n bgr[mask == 0] = self.mean_bgr\n bgr = skimage.transform.resize(\n bgr, (self.insize, self.insize), preserve_range=True)\n input_msg = bridge.cv2_to_imgmsg(bgr.astype(np.uint8), encoding='bgr8')\n input_msg.header = imgmsg.header\n self.pub_input.publish(input_msg)\n\n blob = (bgr - self.mean_bgr).transpose((2, 0, 1))\n x_data = np.array([blob], dtype=np.float32)\n if self.gpu != -1:\n x_data = cuda.to_gpu(x_data, device=self.gpu)\n if LooseVersion(chainer.__version__) < LooseVersion('2.0.0'):\n x = Variable(x_data, volatile=True)\n self.model.train = False\n self.model(x)\n else:\n with chainer.using_config('train', False), \\\n chainer.no_backprop_mode():\n x = Variable(x_data)\n self.model(x)\n\n proba = cuda.to_cpu(self.model.pred.data)[0]\n label = np.argmax(proba)\n label_name = self.target_names[label]\n label_proba = proba[label]\n cls_msg = ClassificationResult(\n header=imgmsg.header,\n labels=[label],\n label_names=[label_name],\n label_proba=[label_proba],\n probabilities=proba,\n classifier=self.model_name,\n target_names=self.target_names,\n )\n self.pub.publish(cls_msg)\n\n\nif __name__ == '__main__':\n rospy.init_node('vgg16_object_recognition')\n app = VGG16ObjectRecognition()\n rospy.spin()\n"
] | [
[
"numpy.ones"
],
[
"numpy.array",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
565353780/railway-fault-detect | [
"56c5df835d21efeb4e09111282d251c80eaa6ca0"
] | [
"src/Python/lapnet/test_line.py"
] | [
"import torch\nimport numpy as np\nimport os\n\nimport cv2\n\nfrom LapNet import LAPNet\nfrom create_dataset import createDataset\nfrom torch.nn import DataParallel\nfrom collections import OrderedDict\nfrom torch.nn.parameter import Parameter\nimport json\nimport base64\nimport numpy as np\n\nfrom flask import Flask, request, Response\napp = Flask(__name__)\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\nModelName = \"LapNet_chkpt_better_epoch3978_GPU0_line.pth\"\n# ModelName = \"LapNet_chkpt_better_epoch1890_GPU0_chLi_line.pth\"\nDetectMode = \"line\"\nPort = \"9360\"\n\nclass LapNet_Test:\n def __init__(self, model_name, detect_mode):\n # torch.cuda.set_device(args.gpu_idx)\n torch.cuda.set_device(0)\n\n # self.INPUT_CHANNELS = 3\n # self.OUTPUT_CHANNELS = 2\n # self.LEARNING_RATE = args.lr #1e-5\n # self.BATCH_SIZE = args.batch_size #20\n # self.NUM_EPOCHS = args.epoch #100\n # self.LOG_INTERVAL = 20\n # self.INS_CH = 32\n # self.SIZE = [args.img_size[0], args.img_size[1]] #[224, 224]\n # self.NUM_WORKERS = args.num_workers #20\n\n self.INPUT_CHANNELS = 3\n self.OUTPUT_CHANNELS = 2\n self.LEARNING_RATE = 3e-4\n self.BATCH_SIZE = 32\n self.NUM_EPOCHS = 10000000000000\n self.LOG_INTERVAL = 20\n self.INS_CH = 32\n self.SIZE = [1024,512]\n self.NUM_WORKERS = 32\n\n self.model_name = model_name\n self.detect_mode = detect_mode\n\n self.root_path = '../../../thirdparty/lapnet-gpu'\n\n self.model = LAPNet(input_ch=self.INPUT_CHANNELS, output_ch=self.OUTPUT_CHANNELS,internal_ch = 8).cuda()\n self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.LEARNING_RATE, betas=(0.9, 0.99), amsgrad=True)\n\n chkpt_filename = self.root_path + '/trained_model/'+ self.model_name\n\n if not os.path.exists(self.root_path + '/trained_model'):\n os.mkdir(self.root_path + '/trained_model')\n if os.path.isfile(chkpt_filename):\n checkpoint = torch.load(chkpt_filename)\n self.start_epoch = checkpoint['epoch']\n self.optimizer.load_state_dict(checkpoint['optimizer'])\n self.model.load_state_dict(checkpoint['net'])\n self.load_state_dict(self.model, self.state_dict(self.model))\n\n def state_dict(self, model, destination=None, prefix='', keep_vars=False):\n own_state = model.module if isinstance(model, torch.nn.DataParallel) \\\n else model\n if destination is None:\n destination = OrderedDict()\n for name, param in own_state._parameters.items():\n if param is not None:\n destination[prefix + name] = param if keep_vars else param.data\n for name, buf in own_state._buffers.items():\n if buf is not None:\n destination[prefix + name] = buf\n for name, module in own_state._modules.items():\n if module is not None:\n self.state_dict(module, destination, prefix + name + '.', keep_vars=keep_vars)\n return destination\n\n def load_state_dict(self, model, state_dict, strict=True):\n own_state = model.module.state_dict() if isinstance(model, torch.nn.DataParallel) \\\n else model.state_dict()\n for name, param in state_dict.items():\n if name in own_state:\n if isinstance(param, Parameter):\n # backwards compatibility for serialized parameters\n param = param.data\n try:\n own_state[name].copy_(param)\n except Exception:\n raise RuntimeError('While copying the parameter named {}, '\n 'whose dimensions in the model are {} and '\n 'whose dimensions in the checkpoint are {}.'\n .format(name, own_state[name].size(), param.size()))\n elif strict:\n raise KeyError('unexpected key \"{}\" in state_dict'\n .format(name))\n if strict:\n missing = set(own_state.keys()) - set(state_dict.keys())\n if len(missing) > 0:\n raise KeyError('missing keys in state_dict: \"{}\"'.format(missing))\n\nlapnet_test = LapNet_Test(ModelName, DetectMode)\nlapnet_test.model.eval()\n\[email protected](\"/predict\", methods=[\"POST\"])\ndef predict():\n data= request.get_data()\n data_json = json.loads(data)\n\n img_b64encode = bytes(data_json[\"Image\"], encoding=\"utf-8\")\n \n img_b64decode = base64.b64decode(img_b64encode)\n\n img_array = np.frombuffer(img_b64decode, np.uint8)\n image = cv2.imdecode(img_array, cv2.COLOR_BGR2RGB)\n\n train_dataset = createDataset(\"\", size=lapnet_test.SIZE, image=image)\n train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=24, pin_memory=True,\n shuffle=False, num_workers=0)\n \n img = list(enumerate(train_dataloader))[0][1]\n\n img_tensor = torch.tensor(img).cuda()\n\n sem_pred = lapnet_test.model(img_tensor)\n\n seg_map = torch.squeeze(sem_pred, 0).cpu().detach().numpy()\n\n seg_show = seg_map[1]\n\n _, seg_show2 = cv2.threshold(seg_show + 1, 0, 0, cv2.THRESH_TOZERO)\n seg_show2 = cv2.normalize(seg_show2, seg_show2, 0, 1, cv2.NORM_MINMAX)\n seg_show2 = cv2.convertScaleAbs(seg_show2, seg_show2, 255)\n result_img = cv2.applyColorMap(seg_show2, cv2.COLORMAP_MAGMA)\n\n output_img_array = cv2.imencode(\".jpg\", result_img)[1]\n\n output_img_b64encode = str(base64.b64encode(output_img_array))[2:-1]\n\n image_output_json = {}\n\n image_output_json[\"OutputImage\"] = output_img_b64encode\n\n return image_output_json\n\nif __name__ == \"__main__\":\n app.run(host=\"0.0.0.0\", port=Port,debug=True)\n"
] | [
[
"torch.cuda.set_device",
"torch.load",
"torch.utils.data.DataLoader",
"torch.tensor",
"numpy.frombuffer",
"torch.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
undeadinu/dldt | [
"fbc7a4a710c24def8ab199926a7da90a0394b87d",
"fbc7a4a710c24def8ab199926a7da90a0394b87d",
"fbc7a4a710c24def8ab199926a7da90a0394b87d"
] | [
"model-optimizer/mo/front/common/partial_infer/matmul.py",
"model-optimizer/extensions/middle/FusePermutesSequence.py",
"inference-engine/ie_bridges/python/sample/classification_sample.py"
] | [
"\"\"\"\n Copyright (c) 2018 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging as log\n\nimport numpy as np\n\nfrom mo.utils.error import Error\n\n\ndef tf_matmul_infer(node):\n assert (len(node.in_nodes()) == 2)\n shapes = [node.in_node(i).shape for i in range(2)]\n log.debug('matmul shapes: {}'.format(shapes))\n if node.transpose_a or node.transpose_b or any(s is None or len(s) < 2 for s in shapes):\n log.error(\"MatMul wasn't able to infer shape\")\n return\n if any(shapes[0][:-2] != shapes[1][:-2]) or shapes[0][-1] != shapes[1][-2]:\n log.error(\"MatMul wasn't able to infer shape because input dimensions are not compatible\")\n return\n if any(shapes[0][1:-1] != 1):\n log.error(\"MatMul wasn't able to infer shapes because input[0] shape is invalid: {}\".format(shapes[0]))\n return\n\n shape_tuple = (np.array([shapes[0][0]], dtype=np.int64), np.array([shapes[1][-1]], dtype=np.int64))\n if len(shapes[0]) > 2:\n # TODO Investigate case when MatMul have inputs with not matching output dimensions\n # It looks to be a practical case and if we add outer dimensions of the first argument\n # it will lead to incorrect model sometimes. TF documentation is unclear.\n log.warning('Ignored outer dimensions of input tensor for MatMul node: {}'.format(node.name))\n # shape_tuple = (shapes[0][:-2], *shape_tuple)\n\n log.debug('shape_tuple: {}'.format(shape_tuple))\n node.out_node().shape = np.concatenate(shape_tuple)\n node['channel_dims'] = node.out_node().shape.size - 1\n log.debug('matmul shape: {}'.format(node.out_node().shape))\n\n\n\ndef onnx_gemm_infer(node):\n assert (len(node.in_nodes()) == 3)\n shapeA = node.in_node(0).shape\n shapeB = node.in_node(1).shape\n shapeC = node.in_node(2).shape\n\n assert shapeA.size >= 2 and shapeB.size == 2 and shapeC.size in [1, 2]\n\n if shapeA.size > 2 and node.transpose_a:\n raise Error(\n 'ONNX Gemm operation do not support {}dimensional input with set transA key'.format(shapeA.size))\n\n # apply transposes and broadcasts\n if node.transpose_a:\n shapeA = shapeA[[1,0]]\n if node.transpose_b:\n shapeB = shapeB[[1,0]]\n if node.broadcast_c and shapeC.size == 1:\n shapeC = np.array([shapeA[0], shapeC[0]])\n\n node.out_node().shape = shapeC\n return\n\n",
"\"\"\"\n Copyright (c) 2018 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport networkx as nx\nimport numpy as np\n\nfrom extensions.middle.ConvertLayoutDependentOperations import ConvertLayoutDependentOperations\nfrom mo.graph.graph import Node\nfrom mo.middle.passes.eliminate import merge_data_nodes, graph_clean_up_tf\nfrom mo.middle.passes.fusing.helpers import get_next_operation\nfrom mo.middle.replacement import MiddleReplacementPattern\nfrom mo.utils.error import Error\n\n\nclass FusePermutesSequence(MiddleReplacementPattern):\n \"\"\"\n This pass finds sequence of Permute operations and merge them to single Permute operation\n In case if resulting Permutation do nothing, we just remove it\n \"\"\"\n\n enabled = True\n\n def run_after(self):\n return [ConvertLayoutDependentOperations]\n\n def find_and_replace_pattern(self, graph: nx.MultiDiGraph):\n for node in list(graph.nodes()):\n if node not in graph.nodes():\n continue\n permute_node = Node(graph, node)\n if permute_node.has_valid('type') and permute_node.type == 'Permute':\n list_of_permutes = [permute_node]\n # Get sequence of permutations\n node = permute_node\n while True:\n next_ops = get_next_operation(node)\n if len(next_ops) != 1:\n break\n\n next_op = next_ops[0]\n if next_op.has_valid('type') and next_op.type == 'Permute':\n list_of_permutes.append(next_op)\n node = next_op\n else:\n break\n\n final_permutation = np.array([x for x in range(len(list_of_permutes[0].order))], dtype=np.int64)\n for permute in list_of_permutes:\n if not permute.has_valid('order'):\n raise Error(\"Permute node {} has wrong attribute order = None\".format(permute.name))\n final_permutation = final_permutation[np.array(permute.order, dtype=np.int64)]\n\n if np.array_equal(final_permutation, [x for x in range(len(list_of_permutes[0].order))]):\n first_data_node, last_data_node = list_of_permutes[0].in_node(), list_of_permutes[-1].out_node()\n else:\n if len(list_of_permutes) < 2:\n continue\n first_data_node, last_data_node = list_of_permutes[0].out_node(), list_of_permutes[-1].out_node()\n list_of_permutes[0].order = final_permutation\n\n graph.remove_edge(first_data_node.id, first_data_node.out_node().id)\n graph.remove_edge(last_data_node.in_node().id, last_data_node.id)\n\n merge_data_nodes(graph, first_data_node, last_data_node)\n graph.remove_node(last_data_node.id)\n graph_clean_up_tf(graph)\n",
"#!/usr/bin/env python\n\"\"\"\n Copyright (c) 2018 Intel Corporation\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\nfrom __future__ import print_function\nimport sys\nimport os\nfrom argparse import ArgumentParser\nimport cv2\nimport numpy as np\nimport logging as log\nfrom time import time\nfrom openvino.inference_engine import IENetwork, IEPlugin\n\n\ndef build_argparser():\n parser = ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", help=\"Path to an .xml file with a trained model.\", required=True, type=str)\n parser.add_argument(\"-i\", \"--input\", help=\"Path to a folder with images or path to an image files\", required=True,\n type=str, nargs=\"+\")\n parser.add_argument(\"-l\", \"--cpu_extension\",\n help=\"MKLDNN (CPU)-targeted custom layers.Absolute path to a shared library with the kernels \"\n \"impl.\", type=str, default=None)\n parser.add_argument(\"-pp\", \"--plugin_dir\", help=\"Path to a plugin folder\", type=str, default=None)\n parser.add_argument(\"-d\", \"--device\",\n help=\"Specify the target device to infer on; CPU, GPU, FPGA or MYRIAD is acceptable. Sample \"\n \"will look for a suitable plugin for device specified (CPU by default)\", default=\"CPU\",\n type=str)\n parser.add_argument(\"--labels\", help=\"Labels mapping file\", default=None, type=str)\n parser.add_argument(\"-nt\", \"--number_top\", help=\"Number of top results\", default=10, type=int)\n parser.add_argument(\"-ni\", \"--number_iter\", help=\"Number of inference iterations\", default=1, type=int)\n parser.add_argument(\"-pc\", \"--perf_counts\", help=\"Report performance counters\", default=False, action=\"store_true\")\n\n return parser\n\n\ndef main():\n log.basicConfig(format=\"[ %(levelname)s ] %(message)s\", level=log.INFO, stream=sys.stdout)\n args = build_argparser().parse_args()\n model_xml = args.model\n model_bin = os.path.splitext(model_xml)[0] + \".bin\"\n\n # Plugin initialization for specified device and load extensions library if specified\n plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)\n if args.cpu_extension and 'CPU' in args.device:\n plugin.add_cpu_extension(args.cpu_extension)\n # Read IR\n log.info(\"Loading network files:\\n\\t{}\\n\\t{}\".format(model_xml, model_bin))\n net = IENetwork.from_ir(model=model_xml, weights=model_bin)\n\n if plugin.device == \"CPU\":\n supported_layers = plugin.get_supported_layers(net)\n not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]\n if len(not_supported_layers) != 0:\n log.error(\"Following layers are not supported by the plugin for specified device {}:\\n {}\".\n format(plugin.device, ', '.join(not_supported_layers)))\n log.error(\"Please try to specify cpu extensions library path in sample's command line parameters using -l \"\n \"or --cpu_extension command line argument\")\n sys.exit(1)\n\n assert len(net.inputs.keys()) == 1, \"Sample supports only single input topologies\"\n assert len(net.outputs) == 1, \"Sample supports only single output topologies\"\n\n log.info(\"Preparing input blobs\")\n input_blob = next(iter(net.inputs))\n out_blob = next(iter(net.outputs))\n net.batch_size = len(args.input)\n\n # Read and pre-process input images\n n, c, h, w = net.inputs[input_blob].shape\n images = np.ndarray(shape=(n, c, h, w))\n for i in range(n):\n image = cv2.imread(args.input[i])\n if image.shape[:-1] != (h, w):\n log.warning(\"Image {} is resized from {} to {}\".format(args.input[i], image.shape[:-1], (h, w)))\n image = cv2.resize(image, (w, h))\n image = image.transpose((2, 0, 1)) # Change data layout from HWC to CHW\n images[i] = image\n log.info(\"Batch size is {}\".format(n))\n\n # Loading model to the plugin\n log.info(\"Loading model to the plugin\")\n exec_net = plugin.load(network=net)\n del net\n\n # Start sync inference\n log.info(\"Starting inference ({} iterations)\".format(args.number_iter))\n infer_time = []\n for i in range(args.number_iter):\n t0 = time()\n res = exec_net.infer(inputs={input_blob: images})\n infer_time.append((time()-t0)*1000)\n log.info(\"Average running time of one iteration: {} ms\".format(np.average(np.asarray(infer_time))))\n if args.perf_counts:\n perf_counts = exec_net.requests[0].get_perf_counts()\n log.info(\"Performance counters:\")\n print(\"{:<70} {:<15} {:<15} {:<15} {:<10}\".format('name', 'layer_type', 'exet_type', 'status', 'real_time, us'))\n for layer, stats in perf_counts.items():\n print (\"{:<70} {:<15} {:<15} {:<15} {:<10}\".format(layer, stats['layer_type'], stats['exec_type'],\n stats['status'], stats['real_time']))\n\n # Processing output blob\n log.info(\"Processing output blob\")\n res = res[out_blob]\n log.info(\"Top {} results: \".format(args.number_top))\n if args.labels:\n with open(args.labels, 'r') as f:\n labels_map = [x.split(sep=' ', maxsplit=1)[-1].strip() for x in f]\n else:\n labels_map = None\n for i, probs in enumerate(res):\n probs = np.squeeze(probs)\n top_ind = np.argsort(probs)[-args.number_top:][::-1]\n print(\"Image {}\\n\".format(args.input[i]))\n for id in top_ind:\n det_label = labels_map[id] if labels_map else \"#{}\".format(id)\n print(\"{:.7f} label {}\".format(probs[id], det_label))\n print(\"\\n\")\n\n del exec_net\n del plugin\n\n\nif __name__ == '__main__':\n sys.exit(main() or 0)\n"
] | [
[
"numpy.concatenate",
"numpy.array"
],
[
"numpy.array"
],
[
"numpy.asarray",
"numpy.argsort",
"numpy.squeeze",
"numpy.ndarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lsoffi/EsperienzeDiLaboratorioDiCalcolo201920 | [
"7a2a821b37cc8dfca527e9afb639a86a8e6c759b",
"7a2a821b37cc8dfca527e9afb639a86a8e6c759b",
"7a2a821b37cc8dfca527e9afb639a86a8e6c759b"
] | [
"esercitazioni/ex3.py",
"esercitazioni201920-Livia/EX2/ex2_3.py",
"esercitazioni/ex6_1.py"
] | [
"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nplt.title('Un primo plot con Python')\nplt.xlabel('x')\nplt.ylabel('y')\nx = np.linspace(0.0, 5.0, 100)\ny = x\nplt.plot(x,y,label='y=x')\nx, y = np.loadtxt('temp.dat', usecols=(0,2), delimiter=' ', unpack=True)\nplt.plot(x,y, 'x',label='Loaded from file!')\nplt.savefig('traiettoria.png')\nplt.show()\n",
"#!/usr/bin/env python3\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nplt.title('Traiettoria')\nplt.xlabel('x')\nplt.ylabel('y')\nx, y = np.loadtxt('temp.dat', comments=['#'], usecols=(1,2), unpack=True)\nplt.plot(x, y, 'x',label='Traiettoria')\nplt.savefig('traiettoria.png')\nplt.show()\n\n# facciamo un reset della figura\nplt.clf()\n\nplt.title('x in funzione del tempo')\nplt.xlabel('t')\nplt.ylabel('x')\nt, x = np.loadtxt('temp.dat', comments=['#'], usecols=(0,1), unpack=True)\nplt.plot(t, x, 'x',label='x in funzione del tempo')\nplt.savefig('x.png')\nplt.show()\n\n# un altro reset della figura\nplt.clf()\n\nplt.title('y in funzione del tempo')\nplt.xlabel('t')\nplt.ylabel('y')\nt, y = np.loadtxt('temp.dat', comments=['#'], usecols=(0,2), unpack=True)\nplt.plot(t, y, 'x',label='y in funzione del tempo')\nplt.savefig('y.png')\nplt.show()\n",
"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter)\nimport matplotlib.font_manager as font_manager\nfontp={'size':'18', 'color': 'blue'}\nplt.title('Distribuzione Binomiale',**fontp)\nax = plt.gca()\nax.xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\nax.yaxis.set_minor_locator(MultipleLocator(0.01))\nax.xaxis.set_major_locator(MultipleLocator(2))\nax.xaxis.set_minor_locator(MultipleLocator(1))\nplt.xlabel('k',**fontp)\nplt.ylabel('P(k)',**fontp)\nx, y = np.loadtxt('bernoulli_20.dat', unpack=True)\nplt.plot(x,y, 'b-',label='bernoulli')\nplt.savefig('bernoulli_20.png')\nplt.show()\n"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
],
[
"matplotlib.pyplot.gca",
"matplotlib.ticker.MultipleLocator",
"matplotlib.pyplot.title",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.ticker.FormatStrFormatter",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Rockysed/PSC_classification | [
"1815b673ac9374d9d2abd08ba0f1f43597316dee",
"1815b673ac9374d9d2abd08ba0f1f43597316dee"
] | [
"code/plotting_psc.py",
"code/mean_var_bt.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 23 10:48:58 2019\r\n\r\n@author: rocco\r\n\"\"\"\r\nimport cartopy.crs as ccrs\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport os\r\n\r\n\"\"\"\r\nDefinition function to plot psc, df is a pandas dataframe, i = 0 if Northern Emisphere, i = 1 if Southern Emisphere\r\ntitle, classifier_type = [labels_bc, labels_svm_pc]\r\n\"\"\"\r\ndef plot_psc(df, i, title, classifier_type):\r\n if i == 1:\r\n ax = plt.axes(projection=ccrs.Orthographic(0, -90))\r\n else:\r\n ax = plt.axes(projection=ccrs.Orthographic(0, 90))\r\n ax.coastlines(resolution='10m')\r\n ax.gridlines() \r\n if classifier_type == \"labels_bc\":\r\n markers = ['s', 's', '^', 'o', 'D', 'v', '>']\r\n colors = ['w', 'b', 'r', 'chartreuse', 'cyan', 'goldenrod', 'steelblue']\r\n edges = ['k', 'b', 'r', 'chartreuse', 'cyan', 'goldenrod', 'steelblue']\r\n labels = ['unspec.', 'ICE', 'NAT', 'STSmix', 'ICE_NAT', \"NAT_STS\", \"ICE_STS\"]\r\n for j in range (7):\r\n plt.scatter(df[df[\"labels_bc\"]==j][\"lon\"] , df[df[\"labels_bc\"]==j][\"lat\"], c = colors[j], s=40 \\\r\n , marker=markers[j], transform=ccrs.Geodetic(), label=labels[j], edgecolors=edges[j])\r\n else:\r\n markers = ['s', '^', 'o', 'D', 'v']\r\n colors = ['b', 'r', 'chartreuse', 'chartreuse', 'chartreuse']\r\n labels = ['ICE', 'NAT', 'STS_1', 'STS_2', 'STS_3']\r\n for j in range (0, 5):\r\n plt.scatter(df[df[classifier_type]==j+1][\"lon\"] , df[df[classifier_type]==j+1][\"lat\"], c = colors[j], s=40 \\\r\n , marker=markers[j], transform=ccrs.Geodetic(), label=labels[j])\r\n \r\n\r\n if i == 1:\r\n ax.set_extent([-180, 180, -60, -90], crs=ccrs.PlateCarree())\r\n else:\r\n ax.set_extent([-180, 180, 60, 90], crs=ccrs.PlateCarree()) \r\n plt.plot()\r\n plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))\r\n plt.title(title)\r\n \r\n#dates to be considered \r\n\"\"\"\r\ndates = [\"2003-05-23\", \"2003-06-05\", \"2003-06-09\", \"2003-06-11\", \"2003-06-12\", \"2003-06-15\", \"2008-05-28\" \\\r\n , \"2008-05-29\", \"2008-05-30\", \"2008-05-31\", \"2008-06-01\", \"2008-06-02\", \"2007-01-25\", \"2011-01-07\" \\\r\n , \"2007-07-08\", \"2008-07-25\", \"2008-08-29\"]\r\n\"\"\"\r\n\"\"\"\r\ndates = [\"2003-05-23\", \"2003-06-05\", \"2003-06-09\", \"2003-06-11\", \"2003-06-12\", \"2003-06-15\", \"2008-05-28\" \\\r\n , \"2008-05-29\", \"2008-05-30\", \"2008-05-31\", \"2008-06-01\", \"2008-06-02\", \"2007-01-25\", \"2011-01-07\" \\\r\n , \"2007-07-08\", \"2008-07-25\", \"2008-08-29\"]\r\n\"\"\"\r\ndates = [\"2009-05-23\", \"2009-06-14\", \"2009-06-24\", \"2009-07-24\", \"2009-08-26\"]\r\n#Minumum and maximum tangent height\r\n#classifier type\r\n#classifier_type = \"labels_svm_kpca_red\"\r\n#classifier_type = \"labels_svm_lk_pc_rf_scale_v1\"\r\nclassifier_type = \"labels_bc\"\r\nif classifier_type == \"labels_svm_lk_pc_rf_scale_v1\":\r\n cl_type = \"SVM (RF + PCA)\"\r\nif classifier_type == \"labels_svm_kpca\":\r\n cl_type = \"SVM (KPCA)\" \r\nif classifier_type == \"labels_svm_auto\":\r\n cl_type = \"SVM (autoenc)\" \r\nif classifier_type == \"labels_bc\":\r\n cl_type = \"Bayesian cl.\"\r\n#cycle on dates\r\n\r\n#df = (df_may_2003[(df_may_2003['htang'] > htang_min) & (df_may_2003['htang'] < htang_max)]).loc[dates[0]]\r\n\r\nfor date in dates:\r\n year = date.split(\"-\")[0]\r\n month = date.split(\"-\")[1]\r\n day = date.split(\"-\")[2]\r\n c_date = year + month + day\r\n df = pd.read_hdf( '../data/mipas_pd/' + year + '_' + month + '_prova.h5','df_reduced', where=['index == c_date'])\r\n bins = ([(14, 16), (16, 18), (18, 22), (21.2, 26.8)])\r\n for k in range(0, len(bins)):\r\n df_binned = df[(df[\"htang\"] > bins[k][0]) & (df[\"htang\"] < bins[k][1])]\r\n if df_binned.shape[0] > 0:\r\n if df_binned[\"lat\"].mean() < 0:\r\n i = 1\r\n else: i = 0\r\n title = \"PSC plot date: \" + date + \" Altitude range: \" + str(bins[k][0]) + \"-\" + str(bins[k][1]) + \" [km]\" + \"\\n using \" + cl_type \r\n plot_psc(df_binned, i, title, classifier_type)\r\n my_path = \"../progetti/test_plots_specific_days/new/\" + cl_type\r\n if not os.path.exists(my_path):\r\n os.makedirs(my_path)\r\n my_file = date + \"_v2\" + str(k) +\".png\"\r\n plt.savefig(os.path.join(my_path, my_file))\r\n plt.close()\r\n \r\n\r\n\r\n#plt.scatter(df[df[\"labels_bc\"] == 2][\"lat\"] , df[df[\"labels_bc\"] == 2][\"lon\"], marker = markers[2], s=20, color='r', transform=ccrs.Geodetic())",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Feb 25 14:52:43 2019\r\n\r\n@author: rocco\r\n\"\"\"\r\nimport pandas as pd\r\nimport h5py\r\nimport numpy as np\r\nimport os\r\nimport matplotlib.pyplot as plt\r\n#load cdsdb\r\nnew_file = h5py.File(\"../data/csdb_bt/csdb_bt.h5\", \"r\")\r\nbt_csdb = new_file[\"bt\"][:]\r\nnew_file.close()\r\n#compute mean and var csdb dataset\r\ndf_bt_csdb = pd.DataFrame(bt_csdb)\r\nmean_bt_csdb = df_bt_csdb.mean()\r\nvar_bt_csdb = df_bt_csdb.var()\r\n\r\ndirectory = \"../data/mipas_blabeled_2009/2009_bt\"\r\nfiles = [i for i in os.listdir(directory)]\r\nbt_mipas_tot = np.empty([0, 142])\r\nfor file in files:\r\n #load mipas df\r\n bt_mipas = np.empty([0, 142])\r\n new_file = h5py.File(os.path.join(directory, file), \"r\")\r\n bt_mipas = new_file[\"bt\"][:]\r\n new_file.close()\r\n bt_mipas_tot = np.vstack([bt_mipas_tot, bt_mipas])\r\ndf_bt_mipas = pd.DataFrame(bt_mipas_tot)\r\nmean_bt_mipas = df_bt_mipas.mean()\r\nvar_bt_mipas = df_bt_mipas.var()\r\n#plotting variance\r\nmy_path = \"../progetti/bt_mean_var/\"\r\nif not os.path.exists(my_path):\r\n os.makedirs(my_path)\r\nmy_file = \"var_bt\" + \".png\"\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax2 = ax1.twiny()\r\nax1.plot(np.arange(0,142), var_bt_mipas, label = \"mipas\")\r\nax1.plot(np.arange(0,142), var_bt_csdb, label = \"csdb\")\r\nloc = [84, 99, 107, 113, 125, 131, 137]\r\nnew_tick_locations = np.array(loc)\r\nax2.set_xlim(ax1.get_xlim())\r\nax2.set_xticks(new_tick_locations)\r\ntransition = [\"t1\", \"t2\", \"t3\", \"t4\", \"t5\", \"t6\", \"t7\"]\r\nax2.set_xticklabels(transition)\r\nax2.set_xlabel(\"transition\")\r\ntitle = ax2.set_title(\"Variance: BT\")\r\ntitle.set_y(1.1)\r\nfig.subplots_adjust(top=0.85)\r\nax1.legend()\r\nfor tr in loc:\r\n ax1.axvline(tr, linewidth = 1, color = 'k')\r\nax1.set_ylabel(\"var\")\r\nax1.set_xlabel(\"BT\")\r\nfig.savefig(os.path.join(my_path, my_file))\r\nplt.close()\r\n#plotting mean\r\nmy_file = \"mean_bt\" + \".png\"\r\nfig = plt.figure()\r\nax1 = fig.add_subplot(111)\r\nax2 = ax1.twiny()\r\nax1.plot(np.arange(0,142), mean_bt_mipas, label = \"mipas\")\r\nax1.plot(np.arange(0,142), mean_bt_csdb, label = \"csdb\")\r\nnew_tick_locations = np.array(loc)\r\nax2.set_xlim(ax1.get_xlim())\r\nax2.set_xticks(new_tick_locations)\r\nax2.set_xticklabels(transition)\r\nax2.set_xlabel(\"transition\")\r\ntitle = ax2.set_title(\"Mean: BT\")\r\ntitle.set_y(1.1)\r\nfig.subplots_adjust(top=0.85)\r\nax1.legend() \r\nfor tr in loc:\r\n ax1.axvline(tr, linewidth = 1, color = 'k')\r\nax1.set_ylabel(\"mean\")\r\nax1.set_xlabel(\"BT\")\r\nfig.savefig(os.path.join(my_path, my_file))\r\nplt.close()\r\n\"\"\"\r\n#btd\r\nnew_file = h5py.File(\"../data/csdb_new/csdb_complete.h5\", \"r\")\r\nbtd_csdb = new_file[\"btd_complete\"][:]\r\nnew_file.close()\r\ndf_btd_csdb = pd.DataFrame(btd_csdb)\r\nmean_btd_csdb = df_btd_csdb.mean()\r\nvar_btd_csdb = df_btd_csdb.var()\r\ndirectory = \"../data/mipas_pd\"\r\nfiles = [i for i in os.listdir(directory)]\r\nfiles = files[19:24]\r\ndf_btd_mipas_complete = pd.DataFrame()\r\nfor file in files:\r\n #load mipas df\r\n df_btd_mipas = pd.read_hdf(os.path.join(directory, file),'df_btd')\r\n df_btd_mipas_complete = df_btd_mipas_complete.append(df_btd_mipas)\r\n\r\nmean_btd_mipas = df_btd_mipas.iloc[:, 0:10011].mean()\r\nvar_btd_mipas = df_btd_mipas.iloc[:, 0:10011].var()\r\nplt.plot(np.arange(0,10011), var_btd_mipas, label = \"mipas\")\r\nplt.plot(np.arange(0,10011), var_btd_csdb, label = \"csdb\")\r\n\"\"\""
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_hdf",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.close"
],
[
"numpy.arange",
"numpy.vstack",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.empty",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
AxelAllen/Multimodal-BERT-in-Medical-Image-and-Text-Classification | [
"b60bb7bd4fe07773ee3bf8edfc5011a337ac6037"
] | [
"MMBT/image.py"
] | [
"\"\"\"\nThis code is adapted from the image.py by Kiela et al. (2020) in https://github.com/facebookresearch/mmbt/blob/master/mmbt/models/image.py\nand the equivalent Huggingface implementation: utils_mmimdb.py, which can be\nfound here: https://github.com/huggingface/transformers/blob/8ea412a86faa8e9edeeb6b5c46b08def06aa03ea/examples/research_projects/mm-imdb/utils_mmimdb.py\n\nThe ImageEncoderDenseNet class is modified from the original ImageEncoder class to be based on pre-trained DenseNet\ninstead of ResNet and to be able to load saved pre-trained weights.\n\nThis class makes up the image submodule of the MMBT model.\n\nThe forward function is also modified according to the forward function of the DenseNet model listed here:\n\nOriginal forward function of DenseNet\n\ndef forward(self, x):\n features = self.features(x)\n out = F.relu(features, inplace=True)\n out = F.adaptive_avg_pool2d(out, (1, 1))\n out = torch.flatten(out, 1)\n out = self.classifier(out)\n return out\n\"\"\"\nimport os\nimport logging\nimport torch\nimport torch.nn as nn\nimport torchvision\nimport torch.nn.functional as F\n\n\nlogger = logging.getLogger(__name__)\n\n# mapping number of image embeddings to AdaptiveAvgPool2d output size\nPOOLING_BREAKDOWN = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}\n\n# module assumes that the directory where the saved chexnet weight is in the same level as this module\nMMBT_DIR_PARENT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(MMBT_DIR_PARENT, \"data\")\nMODELS_DIR = os.path.join(DATA_DIR, \"models\")\nSAVED_CHEXNET = os.path.join(MODELS_DIR, \"saved_chexnet.pt\")\n\n\nclass ImageEncoderDenseNet(nn.Module):\n def __init__(self, num_image_embeds, saved_model=True, path=os.path.join(MODELS_DIR, SAVED_CHEXNET)):\n \"\"\"\n\n :type num_image_embeds: int\n :param num_image_embeds: number of image embeddings to generate; 1-9 as they map to specific numbers of pooling\n output shape in the 'POOLING_BREAKDOWN'\n :param saved_model: True to load saved pre-trained model False to use torch pre-trained model\n :param path: path to the saved .pt model file\n \"\"\"\n super().__init__()\n if saved_model:\n # loading pre-trained weight, e.g. ChexNet\n # the model here expects the weight to be regular Tensors and NOT cuda Tensor\n model = torch.load(path)\n logger.info(f\"Saved model loaded from: {path}\")\n else:\n model = torchvision.models.densenet121(pretrained=True)\n\n # DenseNet architecture last layer is the classifier; we only want everything before that\n modules = list(model.children())[:-1]\n self.model = nn.Sequential(*modules)\n # self.model same as original DenseNet self.features part of the forward function\n self.pool = nn.AdaptiveAvgPool2d(POOLING_BREAKDOWN[num_image_embeds])\n\n def forward(self, input_modal):\n \"\"\"\n B = batch\n N = number of image embeddings\n 1024 DenseNet embedding size, this can be changed when instantiating MMBTconfig for modal_hidden_size\n\n Bx3x224x224 (this is input shape) -> Bx1024x7x7 (this is shape after DenseNet CNN layers before the last layer)\n -> Bx1024xN (this is after torch.flatten step in this function below) -> BxNx1024 (this is the shape of the\n output tensor)\n\n :param input_modal: image tensor\n :return:\n \"\"\"\n # Bx3x224x224 -> Bx1024x7x7 -> Bx1024xN -> BxNx1024\n features = self.model(input_modal)\n out = F.relu(features, inplace=True)\n out = self.pool(out)\n out = torch.flatten(out, start_dim=2)\n out = out.transpose(1, 2).contiguous()\n\n return out # BxNx1024"
] | [
[
"torch.nn.Sequential",
"torch.load",
"torch.nn.functional.relu",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WONDER-project/GSAS-II-WONDER-OSX | [
"f90ab85f89f282d1b9686a1cbbf5adc5c48ceac9",
"f90ab85f89f282d1b9686a1cbbf5adc5c48ceac9",
"f90ab85f89f282d1b9686a1cbbf5adc5c48ceac9"
] | [
"GSAS-II-WONDER/SUBGROUPS.py",
"GSAS-II-WONDER/imports/G2img_1TIF.py",
"GSAS-II-WONDER/GSASIIlattice.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n*SUBGROUPS: Interface to special GSAS Bilbao SUBGROUPS & k-SUBGROUPSMAG web pages*\n-------------------------------\n\nExtraction of space subgroups for a given space group and a propagation vector\nfrom the GSAS version of SUBGROUPS & k-SUBGROUPSMAG web page on the Bilbao Crystallographic server\n\n\"\"\"\n########### SVN repository information ###################\n# $Date: 2018-07-10 11:41:00 -0500 (Tue, 10 Jul 2018) $\n# $Author: vondreele $\n# $Revision: 3465 $\n# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/kSUBGROUPSMAG.py $\n# $Id: kSUBGROUPSMAG.py 3465 2018-07-10 16:41:00Z vondreele $\n########### SVN repository information ###################\nfrom __future__ import division, print_function\nimport requests\nimport numpy as np\nimport numpy.linalg as nl\nimport GSASIIspc as G2spc\nimport GSASIIpath\nGSASIIpath.SetBinaryPath()\nsubmagSite = 'http://www.cryst.ehu.es/cgi-bin/cryst/programs/subgrmag1_general_GSAS.pl'\n\ndef GetNonStdSubgroups(SGData, kvec,star=False,landau=False,maximal=False):\n '''Run Bilboa's SUBGROUPS for a non-standard space group. \n This requires doing a post to the Bilboa site, which returns all\n subgroups of the entered space group as the text of a web page \n with a table containing the space group symbol, the \n transformation matrix and index for each subgroup.\n\n :params list kvec: propogation vector as a list of nine string fractions or blank\n :params SGData: space group object (see :ref:`Space Group object<SGData_table>`) \n\n :returns: (error,text) error: if True no error or False; where \n text containts a possible web page text\n '''\n print('''\n For use of SUBGROUPS, please cite:\n Symmetry-Based Computational Tools for Magnetic Crystallography,\n J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo\n Annu. Rev. Mater. Res. 2015. 45,217-48.\n doi: 10.1146/annurev-matsci-070214-021008\n ''')\n \n \n def getSpGrp(item):\n return item.replace('<i>','').replace('</i>','').replace('<sub>','').replace('</sub>','')\n \n def getMatVec(item):\n return item.replace('{','[').replace('}',']')\n \n starmag = 'no'\n if star:\n starmag = 'yes'\n land = 'no'\n if landau:\n land = 'yes'\n celtodas = 'no'\n limite = 'spgroup'\n if maximal:\n limite = 'maximal'\n postdict = {'centrosymmetry':'0','crystalsystem':'0','landau':land,\n 'eleccion':'subgrmag1_k','inicio':'nostandard','celtodas':celtodas,\n 'limite':limite,'list':'Submit','listado':'lista','starmagnetica':starmag,\n 'pointgroup':'0','polarity':'0','sub':'1',\n 'super':'','tipog':'gesp','wyckoffstrain':''}\n text,table = G2spc.SGPrint(SGData)\n OpList = G2spc.TextOps(text,table,reverse=True)\n# GenList = G2spc.TextGen(SGData,reverse=True)\n for item in OpList:\n item += '\\n'\n sym = \"\"\n for i in OpList:\n if sym: sym += '\\n'\n #if sym: sym += ' ' # use this for testing to generate an error in place of previous\n sym += i.lower()\n postdict['generators'] = sym\n for j in [1,2,3]:\n if kvec[3*j-3] == ' ':\n break\n for i,k in zip(('x','y','z'),kvec[3*j-3:3*j]):\n postdict['knm%d%s'%(j,i)] = k\n try:\n r = requests.post(submagSite,postdict)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None,None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None,None\n r.close()\n \n result = page.replace('&','\\n')\n result = result.split('\\n')\n SPGPs = []\n MVs = []\n baseList = []\n itemList = []\n superList = []\n altList = []\n start = 0\n for line in result: #work around bug report from Bilbao\n start += 1\n if 'yesz' in line:\n break\n for line in result[start:]:\n if 'GGG' in line:\n lines = line.split('GGG')\n line = lines[0]\n alts = []\n beg = True\n for sline in lines:\n items = sline.split('z')\n gid = int(items[0])\n if beg:\n baseList.append(gid)\n beg = False\n alts.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n SPGPs.append(getSpGrp(items[4]))\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n altList.append(alts)\n for sline in lines[1:]:\n altList.append([])\n else:\n items = line.split('z')\n gid = int(items[0])\n altList.append([gid,])\n baseList.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n SPGPs.append(getSpGrp(items[4]))\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n result = list(zip(SPGPs,MVs,itemList,altList,superList))\n return result,baseList\n\ndef GetNonStdSubgroupsmag(SGData, kvec,star=False,landau=False,maximal=False):\n '''Run Bilboa's k-Subgroupsmag for a non-standard space group. \n This requires doing a post to the Bilboa site, which returns all\n magnetic subgroups of the entered subgroup as the text of a web page \n with a table containing the BNS magnetic space group symbol, the \n transformation matrix and index for each subgroup.\n\n :params list kvec: propogation vector as a list of three numbers\n :params SGData: space group object (see :ref:`Space Group object<SGData_table>`) \n\n :returns: (error,text) error: if True no error or False; where \n text containts a possible web page text\n '''\n print('''\n For use of k-SUBGROUPSMAG, please cite:\n Symmetry-Based Computational Tools for Magnetic Crystallography,\n J.M. Perez-Mato, S.V. Gallego, E.S. Tasci, L. Elcoro, G. de la Flor, and M.I. Aroyo\n Annu. Rev. Mater. Res. 2015. 45,217-48.\n doi: 10.1146/annurev-matsci-070214-021008\n ''')\n\n def getSpGrp(item):\n return item.replace('<i>','').replace('</i>','').replace('<sub>','').replace('</sub>','')\n \n def getBNS(item):\n spgrp = getSpGrp(item)\n bns = ''\n sid = item.find('<sub>')\n if sid == 8:\n bns = spgrp[1]\n spgrp = '%s_%s %s'%(spgrp[0],bns,spgrp[2:])\n return spgrp,bns\n \n def getMatVec(item):\n return item.replace('{','[').replace('}',']')\n \n starmag = 'no'\n if star:\n starmag = 'yes'\n land = 'no'\n if landau:\n land = 'yes'\n celtodas = 'no'\n limite = 'spgroup'\n if maximal:\n limite = 'maximal'\n postdict = {'centrosymmetry':'0','crystalsystem':'0','landau':land,\n 'eleccion':'subgrmag1_k','inicio':'nostandard','celtodas':celtodas,\n 'limite':limite,'list':'Submit','listado':'lista','starmagnetica':starmag,\n 'pointgroup':'0','polarity':'0','sub':'1.1',\n 'super':'','tipog':'gmag','wyckoffstrain':''}\n text,table = G2spc.SGPrint(SGData)\n OpList = G2spc.TextOps(text,table,reverse=True)\n# OpList = G2spc.TextGen(SGData,reverse=True)\n for item in OpList:\n item += '\\n'\n sym = \"\"\n for i in OpList:\n if sym: sym += '\\n'\n #if sym: sym += ' ' # use this for testing to generate an error in place of previous\n sym += i.lower()\n postdict['generators'] = sym\n for j in [1,2,3]:\n if kvec[3*j-3] == ' ':\n break\n for i,k in zip(('x','y','z'),kvec[3*j-3:3*j]):\n postdict['km%d%s'%(j,i)] = k\n try:\n r = requests.post(submagSite,postdict)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None,None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None,None\n r.close()\n\n result = page.replace('&','\\n')\n result = result.split('\\n')\n start = 0\n for line in result: #work around bug report from Bilbao\n start += 1\n if 'yesz' in line:\n break\n SPGPs = []\n BNSs = []\n MVs = []\n baseList = []\n itemList = []\n superList = []\n altList = []\n for line in result[start:]:\n if 'GGG' in line:\n lines = line.split('GGG')\n alts = []\n beg = True\n for sline in lines:\n items = sline.split('z')\n gid = int(items[0])\n if beg:\n baseList.append(gid)\n beg = False\n alts.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n spgrp,bns = getBNS(items[4])\n SPGPs.append(spgrp)\n BNSs.append(bns)\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n altList.append(alts)\n for sline in lines[1:]:\n altList.append([])\n else:\n items = line.split('z')\n gid = int(items[0])\n altList.append([gid,])\n baseList.append(gid)\n itemList.append(gid)\n superList.append(getMatVec(items[7]))\n spgrp,bns = getBNS(items[4])\n SPGPs.append(spgrp)\n BNSs.append(bns)\n MVs.append([getMatVec(items[5]),getMatVec(items[6])])\n result = list(zip(SPGPs,BNSs,MVs,itemList,altList,superList))\n return result,baseList\n\ndef subBilbaoCheckLattice(spgNum,cell,tol=5):\n '''submit a unit cell to Bilbao PseudoLattice\n '''\n psSite = \"http://www.cryst.ehu.es/cgi-bin/cryst/programs/pseudosym/nph-pseudolattice\"\n cellstr = '+'.join(['{:.5f}'.format(i) for i in cell])\n datastr = \"sgr={:}&cell={:}&tol={:}&submit=Show\".format(\n str(int(spgNum)),cellstr,str(int(tol)))\n try:\n r = requests.post(psSite,data=datastr)\n except: #ConnectionError?\n page = ''\n print('connection error - not on internet')\n return None\n if r.status_code == 200:\n print('request OK')\n page = r.text\n page = page.replace('<font style= \"text-decoration: overline;\">','<font>-')\n else:\n page = ''\n print('request failed. Reason=',r.reason)\n return None\n r.close()\n return page\n\ndef parseBilbaoCheckLattice(page):\n '''find the cell options from the web page returned by Bilbao PseudoLattice\n '''\n cellopts = [i for i in page.split('<tr>') if '<td><pre>' in i]\n found = []\n for c in cellopts:\n cells = c.split(\"pre\")[1].split('<')[0].replace('>','').split('\\n') # list of cells, 1st is approx\n try:\n acell = [float(i) for i in cells[0].split()]\n xmatA = [c.split('[')[i].split(']')[0].split() for i in (1,2,3)]\n xmat = np.array([[eval(i) for i in j] for j in xmatA])\n cellmat = nl.inv(xmat).T\n except:\n print('Error processing cell in',c)\n continue\n found.append((acell,cellmat))\n return found\n\n\ndef test():\n SGData = G2spc.SpcGroup('f d -3 m')[1]\n \n print('test SUBGROUPSMAG') \n results,baseList = GetNonStdSubgroupsmag(SGData,('0','0','0',' ',' ',' ',' ',' ',' ',' '))\n if results:\n for [spgp,bns,mv,gid,altList,supList] in results:\n if gid in baseList:\n print('Space group: %d %s BNS: %s'%(gid,spgp,bns))\n print('MV',mv)\n print('altList:',altList)\n print('superList: ',supList)\n \n print('test SUBGROUPS')\n results,baseList = GetNonStdSubgroups(SGData,('1/3','1/3','1/2',' ',' ',' ',' ',' ',' ',' '))\n if results:\n for [spgp,mv,gid,altList,supList] in results:\n if gid in baseList:\n print('Space group: %d %s'%(gid,spgp))\n print('MV',mv)\n print('altList:',altList)\n print('superList: ',supList)\n \n \n\nif __name__ == '__main__':\n # run self-tests\n selftestquiet = False\n test()\n print (\"OK\")\n",
"# -*- coding: utf-8 -*-\n########### SVN repository information ###################\n# $Date: 2019-08-26 07:27:07 -0500 (Mon, 26 Aug 2019) $\n# $Author: vondreele $\n# $Revision: 4112 $\n# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/imports/G2img_1TIF.py $\n# $Id: G2img_1TIF.py 4112 2019-08-26 12:27:07Z vondreele $\n########### SVN repository information ###################\n'''\n*Module G2img_1TIF: Tagged-image File images*\n--------------------------------------------------\n\nRoutine to read an image in Tagged-image file (TIF) format as well as a variety\nof slightly incorrect pseudo-TIF formats used at instruments around the world.\nNote that the name ``G2img_1TIF`` is used so that this file will\nsort to the top of the image formats and thus show up first in the menu.\n(It is the most common, alas).\n\n'''\n\nfrom __future__ import division, print_function\nimport struct as st\nimport GSASIIobj as G2obj\nimport GSASIIpath\nimport GSASIIfiles as G2fil\nimport numpy as np\nimport time\nDEBUG = False\nGSASIIpath.SetVersionNumber(\"$Revision: 4112 $\")\nclass TIF_ReaderClass(G2obj.ImportImage):\n '''Reads TIF files using a routine (:func:`GetTifData`) that looks\n for files that can be identified from known instruments and will\n correct for slightly incorrect TIF usage. If that routine fails,\n it will be read with a standard TIF reader, which can handle compression\n and other things less commonly used at beamlines. \n '''\n def __init__(self):\n super(self.__class__,self).__init__( # fancy way to self-reference\n extensionlist=('.tif','.tiff'),\n strictExtension=False,\n formatName = 'TIF image',\n longFormatName = 'Various .tif and pseudo-TIF formats'\n )\n self.scriptable = True\n\n def ContentsValidator(self, filename):\n '''Does the header match the required TIF header?\n '''\n fp = open(filename,'rb')\n tag = fp.read(2)\n if 'bytes' in str(type(tag)):\n tag = tag.decode('latin-1')\n if tag == 'II' and int(st.unpack('<h',fp.read(2))[0]) == 42: #little endian\n pass\n elif tag == 'MM' and int(st.unpack('>h',fp.read(2))[0]) == 42: #big endian\n pass\n else:\n return False # header not found; not valid TIF\n fp.close()\n fp.close()\n return True\n \n def Reader(self,filename, ParentFrame=None, **unused):\n '''Read the TIF file using :func:`GetTifData`. If that fails,\n use :func:`scipy.misc.imread` and give the user a chance to\n edit the likely wrong default image parameters. \n '''\n self.Comments,self.Data,self.Npix,self.Image = GetTifData(filename)\n if self.Npix == 0:\n G2fil.G2Print(\"GetTifData failed to read \"+str(filename)+\" Trying SciPy\")\n import scipy.misc\n self.Image = scipy.misc.imread(filename,flatten=True)\n # for scipy 1.2 & later scipy.misc.imread will be removed\n # with note to use imageio.imread instead \n # (N.B. scipy.misc.imread uses PIL/pillow perhaps better to just use pillow)\n self.Npix = self.Image.size\n if ParentFrame:\n self.SciPy = True\n self.Comments = ['no metadata']\n self.Data = {'wavelength': 0.1, 'pixelSize': [200., 200.], 'distance': 100.0}\n self.Data['size'] = list(self.Image.shape)\n self.Data['center'] = [int(i/2) for i in self.Image.shape]\n if self.Npix == 0:\n return False\n self.LoadImage(ParentFrame,filename)\n return True\n\ndef GetTifData(filename):\n '''Read an image in a pseudo-tif format,\n as produced by a wide variety of software, almost always\n incorrectly in some way. \n '''\n import struct as st\n import array as ar\n import ReadMarCCDFrame as rmf\n image = None\n File = open(filename,'rb')\n dataType = 5\n center = [None,None]\n wavelength = None\n distance = None\n polarization = None\n samplechangerpos = None\n try:\n Meta = open(filename+'.metadata','Ur')\n head = Meta.readlines()\n for line in head:\n line = line.strip()\n try:\n if '=' not in line: continue\n keyword = line.split('=')[0].strip()\n if 'dataType' == keyword:\n dataType = int(line.split('=')[1])\n elif 'wavelength' == keyword.lower():\n wavelength = float(line.split('=')[1])\n elif 'distance' == keyword.lower():\n distance = float(line.split('=')[1])\n elif 'polarization' == keyword.lower():\n polarization = float(line.split('=')[1])\n elif 'samplechangercoordinate' == keyword.lower():\n samplechangerpos = float(line.split('=')[1])\n except:\n G2fil.G2Print('error reading metadata: '+line)\n Meta.close()\n except IOError:\n G2fil.G2Print ('no metadata file found - will try to read file anyway')\n head = ['no metadata file found',]\n \n tag = File.read(2)\n if 'bytes' in str(type(tag)):\n tag = tag.decode('latin-1')\n byteOrd = '<'\n if tag == 'II' and int(st.unpack('<h',File.read(2))[0]) == 42: #little endian\n IFD = int(st.unpack(byteOrd+'i',File.read(4))[0])\n elif tag == 'MM' and int(st.unpack('>h',File.read(2))[0]) == 42: #big endian\n byteOrd = '>'\n IFD = int(st.unpack(byteOrd+'i',File.read(4))[0]) \n else:\n# print (tag)\n lines = ['not a detector tiff file',]\n return lines,0,0,0\n File.seek(IFD) #get number of directory entries\n NED = int(st.unpack(byteOrd+'h',File.read(2))[0])\n IFD = {}\n nSlice = 1\n if DEBUG: print('byteorder:',byteOrd)\n for ied in range(NED):\n Tag,Type = st.unpack(byteOrd+'Hh',File.read(4))\n nVal = st.unpack(byteOrd+'i',File.read(4))[0]\n if DEBUG: print ('Try:',Tag,Type,nVal)\n if Type == 1:\n Value = st.unpack(byteOrd+nVal*'b',File.read(nVal))\n elif Type == 2:\n Value = st.unpack(byteOrd+'i',File.read(4))\n elif Type == 3:\n Value = st.unpack(byteOrd+nVal*'h',File.read(nVal*2))\n st.unpack(byteOrd+nVal*'h',File.read(nVal*2))\n elif Type == 4:\n if Tag in [273,279]:\n nSlice = nVal\n nVal = 1\n Value = st.unpack(byteOrd+nVal*'i',File.read(nVal*4))\n elif Type == 5:\n Value = st.unpack(byteOrd+nVal*'i',File.read(nVal*4))\n elif Type == 11:\n Value = st.unpack(byteOrd+nVal*'f',File.read(nVal*4))\n IFD[Tag] = [Type,nVal,Value]\n if DEBUG: print (Tag,IFD[Tag])\n sizexy = [IFD[256][2][0],IFD[257][2][0]]\n [nx,ny] = sizexy\n Npix = nx*ny\n time0 = time.time()\n if 34710 in IFD:\n G2fil.G2Print ('Read MAR CCD tiff file: '+filename)\n marFrame = rmf.marFrame(File,byteOrd,IFD)\n image = np.flipud(np.array(np.asarray(marFrame.image),dtype=np.int32))\n tifType = marFrame.filetitle\n pixy = [marFrame.pixelsizeX/1000.0,marFrame.pixelsizeY/1000.0]\n head = marFrame.outputHead()\n# extract resonable wavelength from header\n wavelength = marFrame.sourceWavelength*1e-5\n wavelength = (marFrame.opticsWavelength > 0) and marFrame.opticsWavelength*1e-5 or wavelength\n wavelength = (wavelength <= 0) and None or wavelength\n# extract resonable distance from header\n distance = (marFrame.startXtalToDetector+marFrame.endXtalToDetector)*5e-4\n distance = (distance <= marFrame.startXtalToDetector*5e-4) and marFrame.xtalToDetector*1e-3 or distance\n distance = (distance <= 0) and None or distance\n# extract resonable center from header\n center = [marFrame.beamX*marFrame.pixelsizeX*1e-9,marFrame.beamY*marFrame.pixelsizeY*1e-9]\n center = (center[0] != 0 and center[1] != 0) and center or [None,None]\n#print head,tifType,pixy\n elif nSlice > 1: #CheMin multislice tif file!\n try:\n import Image as Im\n except ImportError:\n try:\n from PIL import Image as Im\n except ImportError:\n G2fil.G2Print (\"PIL/pillow Image module not present. This TIF cannot be read without this\")\n #raise Exception(\"PIL/pillow Image module not found\")\n lines = ['not a detector tiff file',]\n return lines,0,0,0\n tifType = 'CheMin'\n pixy = [40.,40.]\n image = np.flipud(np.array(Im.open(filename)))*10.\n distance = 18.0\n center = [pixy[0]*sizexy[0]/2000,0] #the CheMin beam stop is here\n wavelength = 1.78892\n elif 272 in IFD:\n ifd = IFD[272]\n File.seek(ifd[2][0])\n S = File.read(ifd[1])\n if b'PILATUS' in S:\n tifType = 'Pilatus'\n dataType = 0\n pixy = [172.,172.]\n File.seek(4096)\n G2fil.G2Print ('Read Pilatus tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(4*Npix),dtype=np.int32),dtype=np.int32)\n else:\n if IFD[258][2][0] == 16:\n if sizexy == [3888,3072] or sizexy == [3072,3888]:\n tifType = 'Dexela'\n pixy = [74.8,74.8]\n G2fil.G2Print ('Read Dexela detector tiff file: '+filename)\n else:\n tifType = 'GE'\n pixy = [200.,200.]\n G2fil.G2Print ('Read GE-detector tiff file: '+filename)\n File.seek(8)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n elif IFD[258][2][0] == 32:\n # includes CHESS & Pilatus files from Area Detector\n tifType = 'CHESS'\n pixy = [200.,200.]\n File.seek(8)\n G2fil.G2Print ('Read as 32-bit unsigned (CHESS) tiff file: '+filename)\n image = np.array(ar.array('I',File.read(4*Npix)),dtype=np.uint32)\n elif 270 in IFD:\n File.seek(IFD[270][2][0])\n S = File.read(IFD[273][2][0]-IFD[270][2][0])\n if b'ImageJ' in S:\n tifType = 'ImageJ'\n dataType = 0\n pixy = [200.,200.]*IFD[277][2][0]\n File.seek(IFD[273][2][0])\n G2fil.G2Print ('Read ImageJ tiff file: '+filename)\n if IFD[258][2][0] == 32:\n image = File.read(4*Npix)\n image = np.array(np.frombuffer(image,dtype=byteOrd+'i4'),dtype=np.int32)\n elif IFD[258][2][0] == 16:\n image = File.read(2*Npix)\n pixy = [109.92,109.92] #for LCLS ImageJ tif files\n image = np.array(np.frombuffer(image,dtype=byteOrd+'u2'),dtype=np.int32)\n else: #gain map from 11-ID-C?\n pixy = [200.,200.]\n tifType = 'Gain map'\n image = File.read(4*Npix)\n image = np.array(np.frombuffer(image,dtype=byteOrd+'f4')*1000,dtype=np.int32)\n \n elif 262 in IFD and IFD[262][2][0] > 4:\n tifType = 'DND'\n pixy = [158.,158.]\n File.seek(512)\n G2fil.G2Print ('Read DND SAX/WAX-detector tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n elif sizexy == [1536,1536]:\n tifType = 'APS Gold'\n pixy = [150.,150.]\n File.seek(64)\n G2fil.G2Print ('Read Gold tiff file:'+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n elif sizexy == [2048,2048] or sizexy == [1024,1024] or sizexy == [3072,3072]:\n if IFD[273][2][0] == 8:\n if IFD[258][2][0] == 32:\n tifType = 'PE'\n pixy = [200.,200.]\n File.seek(8)\n G2fil.G2Print ('Read APS PE-detector tiff file: '+filename)\n if dataType == 5:\n image = np.array(np.frombuffer(File.read(4*Npix),dtype=np.float32),dtype=np.int32) #fastest\n else:\n image = np.array(np.frombuffer(File.read(4*Npix),dtype=np.int32),dtype=np.int32)\n elif IFD[258][2][0] == 16: \n tifType = 'MedOptics D1'\n pixy = [46.9,46.9]\n File.seek(8)\n G2fil.G2Print ('Read MedOptics D1 tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n \n elif IFD[273][2][0] == 4096:\n if sizexy[0] == 3072:\n pixy = [73.,73.]\n tifType = 'MAR225' \n else:\n pixy = [158.,158.]\n tifType = 'MAR325' \n File.seek(4096)\n G2fil.G2Print ('Read MAR CCD tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n elif IFD[273][2][0] == 512:\n tifType = '11-ID-C'\n pixy = [200.,200.]\n File.seek(512)\n G2fil.G2Print ('Read 11-ID-C tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n \n elif sizexy == [4096,4096]:\n if IFD[273][2][0] == 8:\n if IFD[258][2][0] == 16:\n tifType = 'scanCCD'\n pixy = [9.,9.]\n File.seek(8)\n G2fil.G2Print ('Read APS scanCCD tiff file: '+filename)\n image = np.array(ar.array('H',File.read(2*Npix)),dtype=np.int32)\n elif IFD[258][2][0] == 32:\n tifType = 'PE4k'\n pixy = [100.,100.]\n File.seek(8)\n G2fil.G2Print ('Read PE 4Kx4K tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(4*Npix),dtype=np.float32)/2.**4,dtype=np.int32)\n elif IFD[273][2][0] == 4096:\n tifType = 'Rayonix'\n pixy = [73.242,73.242]\n File.seek(4096)\n G2fil.G2Print ('Read Rayonix MX300HE tiff file: '+filename)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.uint16),dtype=np.int32)\n elif sizexy == [391,380]:\n pixy = [109.92,109.92]\n File.seek(8)\n image = np.array(np.frombuffer(File.read(2*Npix),dtype=np.int16),dtype=np.int32)\n elif sizexy == [380,391]:\n File.seek(110)\n pixy = [109.92,109.92]\n image = np.array(np.frombuffer(File.read(Npix),dtype=np.uint8),dtype=np.int32)\n elif sizexy == [825,830]:\n pixy = [109.92,109.92]\n File.seek(8)\n image = np.array(np.frombuffer(File.read(Npix),dtype=np.uint8),dtype=np.int32)\n elif sizexy == [1800,1800]:\n pixy = [109.92,109.92]\n File.seek(110)\n image = np.array(np.frombuffer(File.read(Npix),dtype=np.uint8),dtype=np.int32)\n elif sizexy == [2880,2880]:\n pixy = [150.,150.]\n File.seek(8)\n dt = np.dtype(np.float32)\n dt = dt.newbyteorder(byteOrd)\n image = np.array(np.frombuffer(File.read(Npix*4),dtype=dt),dtype=np.int32)\n# elif sizexy == [960,960]:\n# tiftype = 'PE-BE'\n# pixy = (200,200)\n# File.seek(8)\n# if not imageOnly:\n# print 'Read Gold tiff file:',filename\n# image = np.array(ar.array('H',File.read(2*Npix)),dtype=np.int32)\n \n if image is None:\n lines = ['not a known detector tiff file',]\n return lines,0,0,0\n \n if sizexy[1]*sizexy[0] != image.size: # test is resize is allowed\n lines = ['not a known detector tiff file',]\n return lines,0,0,0\n if GSASIIpath.GetConfigValue('debug'):\n G2fil.G2Print ('image read time: %.3f'%(time.time()-time0))\n image = np.reshape(image,(sizexy[1],sizexy[0]))\n center = (not center[0]) and [pixy[0]*sizexy[0]/2000,pixy[1]*sizexy[1]/2000] or center\n wavelength = (not wavelength) and 0.10 or wavelength\n distance = (not distance) and 100.0 or distance\n polarization = (not polarization) and 0.99 or polarization\n samplechangerpos = (not samplechangerpos) and 0.0 or samplechangerpos\n data = {'pixelSize':pixy,'wavelength':wavelength,'distance':distance,'center':center,'size':sizexy,\n 'setdist':distance,'PolaVal':[polarization,False],'samplechangerpos':samplechangerpos}\n File.close() \n return head,data,Npix,image\n",
"# -*- coding: utf-8 -*-\n'''\n*GSASIIlattice: Unit cells*\n---------------------------\n\nPerform lattice-related computations\n\nNote that *G* is the reciprocal lattice tensor, and *g* is its inverse,\n:math:`G = g^{-1}`, where \n\n .. math::\n\n g = \\\\left( \\\\begin{matrix}\n a^2 & a b\\\\cos\\gamma & a c\\\\cos\\\\beta \\\\\\\\\n a b\\\\cos\\\\gamma & b^2 & b c \\cos\\\\alpha \\\\\\\\\n a c\\\\cos\\\\beta & b c \\\\cos\\\\alpha & c^2\n \\\\end{matrix}\\\\right)\n\nThe \"*A* tensor\" terms are defined as\n:math:`A = (\\\\begin{matrix} G_{11} & G_{22} & G_{33} & 2G_{12} & 2G_{13} & 2G_{23}\\\\end{matrix})` and *A* can be used in this fashion:\n:math:`d^* = \\sqrt {A_1 h^2 + A_2 k^2 + A_3 l^2 + A_4 hk + A_5 hl + A_6 kl}`, where\n*d* is the d-spacing, and :math:`d^*` is the reciprocal lattice spacing, \n:math:`Q = 2 \\\\pi d^* = 2 \\\\pi / d`\n'''\n########### SVN repository information ###################\n# $Date: 2019-04-11 15:59:48 -0500 (Thu, 11 Apr 2019) $\n# $Author: vondreele $\n# $Revision: 3888 $\n# $URL: https://subversion.xray.aps.anl.gov/pyGSAS/trunk/GSASIIlattice.py $\n# $Id: GSASIIlattice.py 3888 2019-04-11 20:59:48Z vondreele $\n########### SVN repository information ###################\nfrom __future__ import division, print_function\nimport math\nimport copy\nimport sys\nimport random as ran\nimport numpy as np\nimport numpy.linalg as nl\nimport GSASIIpath\nimport GSASIImath as G2mth\nimport GSASIIspc as G2spc\nimport GSASIIElem as G2elem\nGSASIIpath.SetVersionNumber(\"$Revision: 3888 $\")\n# trig functions in degrees\nsind = lambda x: np.sin(x*np.pi/180.)\nasind = lambda x: 180.*np.arcsin(x)/np.pi\ntand = lambda x: np.tan(x*np.pi/180.)\natand = lambda x: 180.*np.arctan(x)/np.pi\natan2d = lambda y,x: 180.*np.arctan2(y,x)/np.pi\ncosd = lambda x: np.cos(x*np.pi/180.)\nacosd = lambda x: 180.*np.arccos(x)/np.pi\nrdsq2d = lambda x,p: round(1.0/np.sqrt(x),p)\nrpd = np.pi/180.\nRSQ2PI = 1./np.sqrt(2.*np.pi)\nSQ2 = np.sqrt(2.)\nRSQPI = 1./np.sqrt(np.pi)\nR2pisq = 1./(2.*np.pi**2)\nnxs = np.newaxis\n\ndef sec2HMS(sec):\n \"\"\"Convert time in sec to H:M:S string\n \n :param sec: time in seconds\n :return: H:M:S string (to nearest 100th second)\n \n \"\"\"\n H = int(sec//3600)\n M = int(sec//60-H*60)\n S = sec-3600*H-60*M\n return '%d:%2d:%.2f'%(H,M,S)\n \ndef rotdMat(angle,axis=0):\n \"\"\"Prepare rotation matrix for angle in degrees about axis(=0,1,2)\n\n :param angle: angle in degrees\n :param axis: axis (0,1,2 = x,y,z) about which for the rotation\n :return: rotation matrix - 3x3 numpy array\n\n \"\"\"\n if axis == 2:\n return np.array([[cosd(angle),-sind(angle),0],[sind(angle),cosd(angle),0],[0,0,1]])\n elif axis == 1:\n return np.array([[cosd(angle),0,-sind(angle)],[0,1,0],[sind(angle),0,cosd(angle)]])\n else:\n return np.array([[1,0,0],[0,cosd(angle),-sind(angle)],[0,sind(angle),cosd(angle)]])\n \ndef rotdMat4(angle,axis=0):\n \"\"\"Prepare rotation matrix for angle in degrees about axis(=0,1,2) with scaling for OpenGL \n\n :param angle: angle in degrees\n :param axis: axis (0,1,2 = x,y,z) about which for the rotation\n :return: rotation matrix - 4x4 numpy array (last row/column for openGL scaling)\n\n \"\"\"\n Mat = rotdMat(angle,axis)\n return np.concatenate((np.concatenate((Mat,[[0],[0],[0]]),axis=1),[[0,0,0,1],]),axis=0)\n \ndef fillgmat(cell):\n \"\"\"Compute lattice metric tensor from unit cell constants\n\n :param cell: tuple with a,b,c,alpha, beta, gamma (degrees)\n :return: 3x3 numpy array\n\n \"\"\"\n a,b,c,alp,bet,gam = cell\n g = np.array([\n [a*a, a*b*cosd(gam), a*c*cosd(bet)],\n [a*b*cosd(gam), b*b, b*c*cosd(alp)],\n [a*c*cosd(bet) ,b*c*cosd(alp), c*c]])\n return g\n \ndef cell2Gmat(cell):\n \"\"\"Compute real and reciprocal lattice metric tensor from unit cell constants\n\n :param cell: tuple with a,b,c,alpha, beta, gamma (degrees)\n :return: reciprocal (G) & real (g) metric tensors (list of two numpy 3x3 arrays)\n\n \"\"\"\n g = fillgmat(cell)\n G = nl.inv(g) \n return G,g\n\ndef A2Gmat(A,inverse=True):\n \"\"\"Fill real & reciprocal metric tensor (G) from A.\n\n :param A: reciprocal metric tensor elements as [G11,G22,G33,2*G12,2*G13,2*G23]\n :param bool inverse: if True return both G and g; else just G\n :return: reciprocal (G) & real (g) metric tensors (list of two numpy 3x3 arrays)\n\n \"\"\"\n G = np.array([\n [A[0], A[3]/2., A[4]/2.], \n [A[3]/2.,A[1], A[5]/2.], \n [A[4]/2.,A[5]/2., A[2]]])\n if inverse:\n g = nl.inv(G)\n return G,g\n else:\n return G\n\ndef Gmat2A(G):\n \"\"\"Extract A from reciprocal metric tensor (G)\n\n :param G: reciprocal maetric tensor (3x3 numpy array\n :return: A = [G11,G22,G33,2*G12,2*G13,2*G23]\n\n \"\"\"\n return [G[0][0],G[1][1],G[2][2],2.*G[0][1],2.*G[0][2],2.*G[1][2]]\n \ndef cell2A(cell):\n \"\"\"Obtain A = [G11,G22,G33,2*G12,2*G13,2*G23] from lattice parameters\n\n :param cell: [a,b,c,alpha,beta,gamma] (degrees)\n :return: G reciprocal metric tensor as 3x3 numpy array\n\n \"\"\"\n G,g = cell2Gmat(cell)\n return Gmat2A(G)\n\ndef A2cell(A):\n \"\"\"Compute unit cell constants from A\n\n :param A: [G11,G22,G33,2*G12,2*G13,2*G23] G - reciprocal metric tensor\n :return: a,b,c,alpha, beta, gamma (degrees) - lattice parameters\n\n \"\"\"\n G,g = A2Gmat(A)\n return Gmat2cell(g)\n\ndef Gmat2cell(g):\n \"\"\"Compute real/reciprocal lattice parameters from real/reciprocal metric tensor (g/G)\n The math works the same either way.\n\n :param g (or G): real (or reciprocal) metric tensor 3x3 array\n :return: a,b,c,alpha, beta, gamma (degrees) (or a*,b*,c*,alpha*,beta*,gamma* degrees)\n\n \"\"\"\n oldset = np.seterr('raise')\n a = np.sqrt(max(0,g[0][0]))\n b = np.sqrt(max(0,g[1][1]))\n c = np.sqrt(max(0,g[2][2]))\n alp = acosd(g[2][1]/(b*c))\n bet = acosd(g[2][0]/(a*c))\n gam = acosd(g[0][1]/(a*b))\n np.seterr(**oldset)\n return a,b,c,alp,bet,gam\n\ndef invcell2Gmat(invcell):\n \"\"\"Compute real and reciprocal lattice metric tensor from reciprocal \n unit cell constants\n \n :param invcell: [a*,b*,c*,alpha*, beta*, gamma*] (degrees)\n :return: reciprocal (G) & real (g) metric tensors (list of two 3x3 arrays)\n\n \"\"\"\n G = fillgmat(invcell)\n g = nl.inv(G)\n return G,g\n\ndef cellDijFill(pfx,phfx,SGData,parmDict): \n '''Returns the filled-out reciprocal cell (A) terms \n from the parameter dictionaries corrected for Dij.\n\n :param str pfx: parameter prefix (\"n::\", where n is a phase number)\n :param dict SGdata: a symmetry object\n :param dict parmDict: a dictionary of parameters\n\n :returns: A,sigA where each is a list of six terms with the A terms \n '''\n if SGData['SGLaue'] in ['-1',]:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A1']+parmDict[phfx+'D22'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],\n parmDict[pfx+'A3']+parmDict[phfx+'D12'],parmDict[pfx+'A4']+parmDict[phfx+'D13'],\n parmDict[pfx+'A5']+parmDict[phfx+'D23']]\n elif SGData['SGLaue'] in ['2/m',]:\n if SGData['SGUniq'] == 'a':\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A1']+parmDict[phfx+'D22'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],0,0,parmDict[pfx+'A5']+parmDict[phfx+'D23']]\n elif SGData['SGUniq'] == 'b':\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A1']+parmDict[phfx+'D22'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],0,parmDict[pfx+'A4']+parmDict[phfx+'D13'],0]\n else:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A1']+parmDict[phfx+'D22'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],parmDict[pfx+'A3']+parmDict[phfx+'D12'],0,0]\n elif SGData['SGLaue'] in ['mmm',]:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A1']+parmDict[phfx+'D22'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],0,0,0]\n elif SGData['SGLaue'] in ['4/m','4/mmm']:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A0']+parmDict[phfx+'D11'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],0,0,0]\n elif SGData['SGLaue'] in ['6/m','6/mmm','3m1', '31m', '3']:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A0']+parmDict[phfx+'D11'],\n parmDict[pfx+'A2']+parmDict[phfx+'D33'],parmDict[pfx+'A0']+parmDict[phfx+'D11'],0,0]\n elif SGData['SGLaue'] in ['3R', '3mR']:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A0']+parmDict[phfx+'D11'],\n parmDict[pfx+'A0']+parmDict[phfx+'D11'],\n parmDict[pfx+'A3']+parmDict[phfx+'D23'],parmDict[pfx+'A3']+parmDict[phfx+'D23'],\n parmDict[pfx+'A3']+parmDict[phfx+'D23']]\n elif SGData['SGLaue'] in ['m3m','m3']:\n A = [parmDict[pfx+'A0']+parmDict[phfx+'D11'],parmDict[pfx+'A0']+parmDict[phfx+'D11'],\n parmDict[pfx+'A0']+parmDict[phfx+'D11'],0,0,0]\n return A\n \ndef prodMGMT(G,Mat):\n '''Transform metric tensor by matrix\n \n :param G: array metric tensor\n :param Mat: array transformation matrix\n :return: array new metric tensor\n \n '''\n return np.inner(np.inner(Mat,G),Mat) #right\n# return np.inner(Mat,np.inner(Mat,G)) #right\n# return np.inner(np.inner(G,Mat).T,Mat) #right\n# return np.inner(Mat,np.inner(G,Mat).T) #right\n \ndef TransformCell(cell,Trans):\n '''Transform lattice parameters by matrix\n \n :param cell: list a,b,c,alpha,beta,gamma,(volume)\n :param Trans: array transformation matrix\n :return: array transformed a,b,c,alpha,beta,gamma,volume\n \n '''\n newCell = np.zeros(7)\n g = cell2Gmat(cell)[1]\n newg = prodMGMT(g,Trans)\n newCell[:6] = Gmat2cell(newg)\n newCell[6] = calc_V(cell2A(newCell[:6]))\n return newCell\n \ndef TransformXYZ(XYZ,Trans,Vec):\n return np.inner(XYZ,Trans)+Vec\n \ndef TransformU6(U6,Trans):\n Uij = np.inner(Trans,np.inner(U6toUij(U6),Trans).T)/nl.det(Trans)\n return UijtoU6(Uij)\n\ndef ExpandCell(Atoms,atCodes,cx,Trans):\n Unit =[int(max(abs(np.array(unit)))-1) for unit in Trans.T]\n for i,unit in enumerate(Unit):\n if unit > 0:\n for j in range(unit):\n moreAtoms = copy.deepcopy(Atoms)\n moreCodes = []\n for atom,code in zip(moreAtoms,atCodes):\n atom[cx+i] += 1.\n if '+' in code:\n cell = list(eval(code.split('+')[1]))\n ops = code.split('+')[0]\n else:\n cell = [0,0,0]\n ops = code\n cell[i] += 1\n moreCodes.append('%s+%d,%d,%d'%(ops,cell[0],cell[1],cell[2])) \n Atoms += moreAtoms\n atCodes += moreCodes\n return Atoms,atCodes\n \ndef TransformPhase(oldPhase,newPhase,Trans,Uvec,Vvec,ifMag):\n '''Transform atoms from oldPhase to newPhase\n M' is inv(M)\n does X' = M(X-U)+V transformation for coordinates and U' = MUM/det(M)\n for anisotropic thermal parameters\n \n :param oldPhase: dict G2 phase info for old phase\n :param newPhase: dict G2 phase info for new phase; with new cell & space group\n atoms are from oldPhase & will be transformed\n :param Trans: lattice transformation matrix M\n :param Uvec: array parent coordinates transformation vector U\n :param Vvec: array child coordinate transformation vector V\n :param ifMag: bool True if convert to magnetic phase; \n if True all nonmagnetic atoms will be removed\n \n :return: newPhase dict modified G2 phase info\n :return: atCodes list atom transformation codes\n \n '''\n \n cx,ct,cs,cia = oldPhase['General']['AtomPtrs']\n cm = 0\n if oldPhase['General']['Type'] == 'magnetic':\n cm = cx+4\n oAmat,oBmat = cell2AB(oldPhase['General']['Cell'][1:7])\n nAmat,nBmat = cell2AB(newPhase['General']['Cell'][1:7])\n SGData = newPhase['General']['SGData']\n invTrans = nl.inv(Trans)\n newAtoms,atCodes = FillUnitCell(oldPhase)\n newAtoms,atCodes = ExpandCell(newAtoms,atCodes,cx,Trans)\n if ifMag:\n cia += 3\n cs += 3\n newPhase['General']['Type'] = 'magnetic'\n newPhase['General']['AtomPtrs'] = [cx,ct,cs,cia]\n magAtoms = []\n magatCodes = []\n Landeg = 2.0\n for iat,atom in enumerate(newAtoms):\n if len(G2elem.GetMFtable([atom[ct],],[Landeg,])):\n magAtoms.append(atom[:cx+4]+[0.,0.,0.]+atom[cx+4:])\n magatCodes.append(atCodes[iat])\n newAtoms = magAtoms\n atCodes = magatCodes\n newPhase['Draw Atoms'] = []\n for atom in newAtoms:\n atom[cx:cx+3] = TransformXYZ(atom[cx:cx+3]+Uvec,invTrans.T,Vvec)%1.\n if atom[cia] == 'A':\n atom[cia+2:cia+8] = TransformU6(atom[cia+2:cia+8],Trans)\n atom[cs:cs+2] = G2spc.SytSym(atom[cx:cx+3],SGData)[:2]\n atom[cia+8] = ran.randint(0,sys.maxsize)\n if cm:\n mag = np.sqrt(np.sum(np.array(atom[cm:cm+3])**2))\n if mag:\n mom = np.inner(np.array(atom[cm:cm+3]),oBmat)\n mom = np.inner(mom,invTrans)\n mom = np.inner(mom,nAmat)\n mom /= np.sqrt(np.sum(mom**2))\n atom[cm:cm+3] = mom*mag\n newPhase['Atoms'] = newAtoms\n newPhase['Atoms'],atCodes = GetUnique(newPhase,atCodes)\n newPhase['Drawing'] = []\n newPhase['ranId'] = ran.randint(0,sys.maxsize)\n return newPhase,atCodes\n \ndef FindNonstandard(controls,Phase):\n '''\n Find nonstandard setting of magnetic cell that aligns with parent nuclear cell\n \n :param controls: list unit cell indexing controls\n :param Phase: dict new magnetic phase data (NB:not G2 phase construction); modified here\n :return: None\n \n '''\n abc = np.eye(3)\n cba = np.rot90(np.eye(3))\n cba[1,1] *= -1 #makes c-ba\n Mats = {'abc':abc,'cab':np.roll(abc,2,1),'bca':np.roll(abc,1,1),\n 'acb':np.roll(cba,1,1),'bac':np.roll(cba,2,1),'cba':cba} #ok\n BNS = {'A':{'abc':'A','cab':'C','bca':'B','acb':'A','bac':'B','cba':'C'}, \n 'B':{'abc':'B','cab':'A','bca':'C','acb':'C','bac':'A','cba':'B'},\n 'C':{'abc':'C','cab':'B','bca':'A','acb':'B','bac':'C','cba':'A'},\n 'a':{'abc':'a','cab':'c','bca':'b','acb':'a','bac':'b','cba':'c'}, #Ok\n 'b':{'abc':'b','cab':'a','bca':'c','acb':'c','bac':'a','cba':'b'},\n 'c':{'abc':'c','cab':'b','bca':'a','acb':'b','bac':'c','cba':'a'},\n 'S':{'abc':'S','cab':'S','bca':'S','acb':'S','bac':'S','cba':'S'},\n 'I':{'abc':'I','cab':'I','bca':'I','acb':'I','bac':'I','cba':'I'},\n }\n Trans = Phase['Trans']\n Uvec = Phase['Uvec']\n SGData = Phase['SGData']\n MSG = SGData.get('MagSpGrp',SGData['SpGrp']).split(' ',1)\n MSG[0] += ' '\n bns = ''\n if '_' in MSG[0]:\n bns = MSG[0][2]\n spn = SGData.get('SGSpin',[])\n if 'ortho' in SGData['SGSys']:\n lattSym = G2spc.getlattSym(Trans)\n SpGrp = SGData['SpGrp']\n NTrans = np.inner(Mats[lattSym].T,Trans.T) #ok\n if len(spn): spn[1:4] = np.inner(np.abs(nl.inv(Mats[lattSym])),spn[1:4]) #ok\n SGsym = G2spc.getlattSym(nl.inv(Mats[lattSym]))\n \n if lattSym != 'abc':\n NSG = G2spc.altSettingOrtho[SpGrp][SGsym].replace(\"'\",'').split(' ')\n if ' '.join(NSG) in ['P 2 21 2',]:\n Uvec[1] += .25\n elif ' '.join(NSG) in ['P 21 2 2',]:\n Uvec[0] += .25\n elif ' '.join(NSG) in ['P 2 2 21',]:\n Uvec[2] += .25\n Bns = ''\n if bns:\n Bns = BNS[bns][lattSym]\n NSG[0] += '_'+Bns+' '\n elif len(spn):\n for ifld in [1,2,3]:\n if spn[ifld] < 0:\n NSG[ifld] += \"'\"\n Nresult = [''.join(NSG)+' ',Bns]\n return Nresult,Uvec,NTrans\n else:\n return None\n elif 'mono' in SGData['SGSys']: # and not 'P_A' in Phase['Name']: #skip the one that doesn't work\n newcell = TransformCell(controls[6:12],Trans)\n MatsA = np.array([[1.,0.,0.],[0.,1.,0.],[1.,0,1.]])\n MatsB = np.array([[1.,0.,0.],[0.,1.,0.],[-1.,0,1.]])\n if not 70. < newcell[4] < 110.:\n MSG[1] = MSG[1].replace('c','n')\n MSG[0] = MSG[0].replace('C_c','C_B').replace('P_A','P ')\n if '_' in MSG[0]:\n bns = MSG[0][2]\n if newcell[4] > 110.:\n if newcell[2] > newcell[0]:\n Mats = MatsA\n else:\n MSG[1] = MSG[1].replace('n','c')\n MSG[0] = MSG[0].replace('C ','I ')\n Mats = MatsA.T\n elif newcell[4] < 70.:\n if newcell[2] > newcell[0]:\n Mats = MatsB\n else:\n MSG[1] = MSG[1].replace('n','c')\n MSG[0] = MSG[0].replace('C ','I ')\n Mats = MatsB.T\n Nresult = [' '.join(MSG)+' ',bns]\n NTrans = np.inner(Mats,Trans.T)\n return Nresult,Uvec,NTrans\n return None\n\ndef makeBilbaoPhase(result,uvec,trans,ifMag=False):\n phase = {}\n phase['Name'] = result[0].strip()\n phase['Uvec'] = uvec\n phase['Trans'] = trans\n phase['Keep'] = False\n phase['Use'] = False\n phase['aType'] = ''\n SpGp = result[0].replace(\"'\",'')\n SpGrp = G2spc.StandardizeSpcName(SpGp)\n phase['SGData'] = G2spc.SpcGroup(SpGrp)[1]\n if ifMag:\n BNSlatt = phase['SGData']['SGLatt']\n if not result[1]:\n phase['SGData']['SGSpin'] = G2spc.GetSGSpin(phase['SGData'],result[0])\n phase['SGData']['GenSym'],phase['SGData']['GenFlg'],BNSsym = G2spc.GetGenSym(phase['SGData'])\n if result[1]:\n BNSlatt += '_'+result[1]\n if 'P_S' in BNSlatt: BNSlatt = 'P_c' #triclinic fix\n phase['SGData']['BNSlattsym'] = [BNSlatt,BNSsym[BNSlatt]]\n G2spc.ApplyBNSlatt(phase['SGData'],phase['SGData']['BNSlattsym'])\n phase['SGData']['SpnFlp'] = G2spc.GenMagOps(phase['SGData'])[1]\n phase['SGData']['MagSpGrp'] = G2spc.MagSGSym(phase['SGData'])\n return phase\n\ndef FillUnitCell(Phase):\n Atoms = copy.deepcopy(Phase['Atoms'])\n atomData = []\n atCodes = []\n SGData = Phase['General']['SGData']\n SpnFlp = SGData.get('SpnFlp',[])\n Amat,Bmat = cell2AB(Phase['General']['Cell'][1:7])\n cx,ct,cs,cia = Phase['General']['AtomPtrs']\n cm = 0\n if Phase['General']['Type'] == 'magnetic':\n cm = cx+4\n for iat,atom in enumerate(Atoms):\n XYZ = np.array(atom[cx:cx+3])\n xyz = XYZ%1.\n if atom[cia] == 'A':\n Uij = atom[cia+2:cia+8]\n result = G2spc.GenAtom(xyz,SGData,False,Uij,True)\n for item in result:\n if item[0][2] >= .95: item[0][2] -= 1.\n atom[cx:cx+3] = item[0]\n atom[cia+2:cia+8] = item[1]\n if cm:\n Opr = abs(item[2])%100\n M = SGData['SGOps'][Opr-1][0]\n opNum = G2spc.GetOpNum(item[2],SGData)\n mom = np.inner(np.array(atom[cm:cm+3]),Bmat)\n atom[cm:cm+3] = np.inner(np.inner(mom,M),Amat)*nl.det(M)*SpnFlp[opNum-1]\n atCodes.append('%d:%s'%(iat,str(item[2])))\n atomData.append(atom[:cia+9]) #not SS stuff\n else:\n result = G2spc.GenAtom(xyz,SGData,False,Move=True)\n for item in result:\n if item[0][2] >= .95: item[0][2] -= 1.\n atom[cx:cx+3] = item[0]\n if cm:\n Opr = abs(item[1])%100\n M = SGData['SGOps'][Opr-1][0]\n opNum = G2spc.GetOpNum(item[1],SGData)\n mom = np.inner(np.array(atom[cm:cm+3]),Bmat)\n atom[cm:cm+3] = np.inner(np.inner(mom,M),Amat)*nl.det(M)*SpnFlp[opNum-1]\n atCodes.append('%d:%s'%(iat,str(item[1])))\n atomData.append(atom[:cia+9]) #not SS stuff\n \n return atomData,atCodes\n \ndef GetUnique(Phase,atCodes):\n \n def noDuplicate(xyzA,XYZ):\n if True in [np.allclose(xyzA%1.,xyzB%1.,atol=0.0002) for xyzB in XYZ]:\n return False\n return True\n\n cx,ct = Phase['General']['AtomPtrs'][:2]\n SGData = Phase['General']['SGData']\n Atoms = Phase['Atoms']\n Ind = len(Atoms)\n newAtoms = []\n newAtCodes = []\n Indx = {}\n XYZ = {}\n for ind in range(Ind):\n XYZ[ind] = np.array(Atoms[ind][cx:cx+3])%1.\n Indx[ind] = True\n for ind in range(Ind):\n if Indx[ind]:\n xyz = XYZ[ind]\n for jnd in range(Ind):\n if Atoms[ind][ct-1] == Atoms[jnd][ct-1]:\n if ind != jnd and Indx[jnd]: \n Equiv = G2spc.GenAtom(XYZ[jnd],SGData,Move=True)\n xyzs = np.array([equiv[0] for equiv in Equiv])\n Indx[jnd] = noDuplicate(xyz,xyzs)\n Ind = []\n for ind in Indx:\n if Indx[ind]:\n newAtoms.append(Atoms[ind])\n newAtCodes.append(atCodes[ind])\n return newAtoms,newAtCodes\n \ndef calc_rVsq(A):\n \"\"\"Compute the square of the reciprocal lattice volume (1/V**2) from A'\n\n \"\"\"\n G,g = A2Gmat(A)\n rVsq = nl.det(G)\n if rVsq < 0:\n return 1\n return rVsq\n \ndef calc_rV(A):\n \"\"\"Compute the reciprocal lattice volume (V*) from A\n \"\"\"\n return np.sqrt(calc_rVsq(A))\n \ndef calc_V(A):\n \"\"\"Compute the real lattice volume (V) from A\n \"\"\"\n return 1./calc_rV(A)\n\ndef A2invcell(A):\n \"\"\"Compute reciprocal unit cell constants from A\n returns tuple with a*,b*,c*,alpha*, beta*, gamma* (degrees)\n \"\"\"\n G,g = A2Gmat(A)\n return Gmat2cell(G)\n \ndef Gmat2AB(G):\n \"\"\"Computes orthogonalization matrix from reciprocal metric tensor G\n\n :returns: tuple of two 3x3 numpy arrays (A,B)\n\n * A for crystal to Cartesian transformations (A*x = np.inner(A,x) = X)\n * B (= inverse of A) for Cartesian to crystal transformation (B*X = np.inner(B,X) = x)\n\n \"\"\"\n# cellstar = Gmat2cell(G)\n g = nl.inv(G)\n cell = Gmat2cell(g)\n# A = np.zeros(shape=(3,3))\n return cell2AB(cell)\n# # from Giacovazzo (Fundamentals 2nd Ed.) p.75\n# A[0][0] = cell[0] # a\n# A[0][1] = cell[1]*cosd(cell[5]) # b cos(gamma)\n# A[0][2] = cell[2]*cosd(cell[4]) # c cos(beta)\n# A[1][1] = cell[1]*sind(cell[5]) # b sin(gamma)\n# A[1][2] = -cell[2]*cosd(cellstar[3])*sind(cell[4]) # - c cos(alpha*) sin(beta)\n# A[2][2] = 1./cellstar[2] # 1/c*\n# B = nl.inv(A)\n# return A,B\n \ndef cell2AB(cell):\n \"\"\"Computes orthogonalization matrix from unit cell constants\n\n :param tuple cell: a,b,c, alpha, beta, gamma (degrees)\n :returns: tuple of two 3x3 numpy arrays (A,B)\n A for crystal to Cartesian transformations A*x = np.inner(A,x) = X \n B (= inverse of A) for Cartesian to crystal transformation B*X = np.inner(B,X) = x\n \"\"\"\n G,g = cell2Gmat(cell) \n cellstar = Gmat2cell(G)\n A = np.zeros(shape=(3,3))\n # from Giacovazzo (Fundamentals 2nd Ed.) p.75\n A[0][0] = cell[0] # a\n A[0][1] = cell[1]*cosd(cell[5]) # b cos(gamma)\n A[0][2] = cell[2]*cosd(cell[4]) # c cos(beta)\n A[1][1] = cell[1]*sind(cell[5]) # b sin(gamma)\n A[1][2] = -cell[2]*cosd(cellstar[3])*sind(cell[4]) # - c cos(alpha*) sin(beta)\n A[2][2] = 1./cellstar[2] # 1/c*\n B = nl.inv(A)\n return A,B\n \ndef HKL2SpAng(H,cell,SGData):\n \"\"\"Computes spherical coords for hkls; view along 001\n\n :param array H: arrays of hkl\n :param tuple cell: a,b,c, alpha, beta, gamma (degrees)\n :param dict SGData: space group dictionary\n :returns: arrays of r,phi,psi (radius,inclination,azimuth) about 001 \n \"\"\"\n A,B = cell2AB(cell)\n xH = np.inner(B.T,H)\n r = np.sqrt(np.sum(xH**2,axis=0))\n phi = acosd(xH[2]/r)\n psi = atan2d(xH[1],xH[0])\n phi = np.where(phi>90.,180.-phi,phi)\n# GSASIIpath.IPyBreak()\n return r,phi,psi\n \ndef U6toUij(U6):\n \"\"\"Fill matrix (Uij) from U6 = [U11,U22,U33,U12,U13,U23]\n NB: there is a non numpy version in GSASIIspc: U2Uij\n\n :param list U6: 6 terms of u11,u22,...\n :returns:\n Uij - numpy [3][3] array of uij\n \"\"\"\n U = np.array([\n [U6[0], U6[3], U6[4]], \n [U6[3], U6[1], U6[5]], \n [U6[4], U6[5], U6[2]]])\n return U\n\ndef UijtoU6(U):\n \"\"\"Fill vector [U11,U22,U33,U12,U13,U23] from Uij \n NB: there is a non numpy version in GSASIIspc: Uij2U\n \"\"\"\n U6 = np.array([U[0][0],U[1][1],U[2][2],U[0][1],U[0][2],U[1][2]])\n return U6\n\ndef betaij2Uij(betaij,G):\n \"\"\"\n Convert beta-ij to Uij tensors\n \n :param beta-ij - numpy array [beta-ij]\n :param G: reciprocal metric tensor\n :returns: Uij: numpy array [Uij]\n \"\"\"\n ast = np.sqrt(np.diag(G)) #a*, b*, c*\n Mast = np.multiply.outer(ast,ast) \n return R2pisq*UijtoU6(U6toUij(betaij)/Mast)\n \ndef Uij2betaij(Uij,G):\n \"\"\"\n Convert Uij to beta-ij tensors -- stub for eventual completion\n \n :param Uij: numpy array [Uij]\n :param G: reciprocal metric tensor\n :returns: beta-ij - numpy array [beta-ij]\n \"\"\"\n pass\n \ndef cell2GS(cell):\n ''' returns Uij to betaij conversion matrix'''\n G,g = cell2Gmat(cell)\n GS = G\n GS[0][1] = GS[1][0] = math.sqrt(GS[0][0]*GS[1][1])\n GS[0][2] = GS[2][0] = math.sqrt(GS[0][0]*GS[2][2])\n GS[1][2] = GS[2][1] = math.sqrt(GS[1][1]*GS[2][2])\n return GS \n \ndef Uij2Ueqv(Uij,GS,Amat):\n ''' returns 1/3 trace of diagonalized U matrix'''\n U = np.multiply(U6toUij(Uij),GS)\n U = np.inner(Amat,np.inner(U,Amat).T)\n E,R = nl.eigh(U)\n return np.sum(E)/3.\n \ndef CosAngle(U,V,G):\n \"\"\" calculate cos of angle between U & V in generalized coordinates \n defined by metric tensor G\n\n :param U: 3-vectors assume numpy arrays, can be multiple reflections as (N,3) array\n :param V: 3-vectors assume numpy arrays, only as (3) vector\n :param G: metric tensor for U & V defined space assume numpy array\n :returns:\n cos(phi)\n \"\"\"\n u = (U.T/np.sqrt(np.sum(np.inner(U,G)*U,axis=1))).T\n v = V/np.sqrt(np.inner(V,np.inner(G,V)))\n cosP = np.inner(u,np.inner(G,v))\n return cosP\n \ndef CosSinAngle(U,V,G):\n \"\"\" calculate sin & cos of angle between U & V in generalized coordinates \n defined by metric tensor G\n\n :param U: 3-vectors assume numpy arrays\n :param V: 3-vectors assume numpy arrays\n :param G: metric tensor for U & V defined space assume numpy array\n :returns:\n cos(phi) & sin(phi)\n \"\"\"\n u = U/np.sqrt(np.inner(U,np.inner(G,U)))\n v = V/np.sqrt(np.inner(V,np.inner(G,V)))\n cosP = np.inner(u,np.inner(G,v))\n sinP = np.sqrt(max(0.0,1.0-cosP**2))\n return cosP,sinP\n \ndef criticalEllipse(prob):\n \"\"\"\n Calculate critical values for probability ellipsoids from probability\n \"\"\"\n if not ( 0.01 <= prob < 1.0):\n return 1.54 \n coeff = np.array([6.44988E-09,4.16479E-07,1.11172E-05,1.58767E-04,0.00130554,\n 0.00604091,0.0114921,-0.040301,-0.6337203,1.311582])\n llpr = math.log(-math.log(prob))\n return np.polyval(coeff,llpr)\n \ndef CellBlock(nCells):\n \"\"\"\n Generate block of unit cells n*n*n on a side; [0,0,0] centered, n = 2*nCells+1\n currently only works for nCells = 0 or 1 (not >1)\n \"\"\"\n if nCells:\n N = 2*nCells+1\n N2 = N*N\n N3 = N*N*N\n cellArray = []\n A = np.array(range(N3))\n cellGen = np.array([A//N2-1,A//N%N-1,A%N-1]).T\n for cell in cellGen:\n cellArray.append(cell)\n return cellArray\n else:\n return [0,0,0]\n \ndef CellAbsorption(ElList,Volume):\n '''Compute unit cell absorption\n\n :param dict ElList: dictionary of element contents including mu and\n number of atoms be cell\n :param float Volume: unit cell volume\n :returns: mu-total/Volume\n '''\n muT = 0\n for El in ElList:\n muT += ElList[El]['mu']*ElList[El]['FormulaNo']\n return muT/Volume\n \n#Permutations and Combinations\n# Four routines: combinations,uniqueCombinations, selections & permutations\n#These taken from Python Cookbook, 2nd Edition. 19.15 p724-726\n# \ndef _combinators(_handle, items, n):\n \"\"\" factored-out common structure of all following combinators \"\"\"\n if n==0:\n yield [ ]\n return\n for i, item in enumerate(items):\n this_one = [ item ]\n for cc in _combinators(_handle, _handle(items, i), n-1):\n yield this_one + cc\ndef combinations(items, n):\n \"\"\" take n distinct items, order matters \"\"\"\n def skipIthItem(items, i):\n return items[:i] + items[i+1:]\n return _combinators(skipIthItem, items, n)\ndef uniqueCombinations(items, n):\n \"\"\" take n distinct items, order is irrelevant \"\"\"\n def afterIthItem(items, i):\n return items[i+1:]\n return _combinators(afterIthItem, items, n)\ndef selections(items, n):\n \"\"\" take n (not necessarily distinct) items, order matters \"\"\"\n def keepAllItems(items, i):\n return items\n return _combinators(keepAllItems, items, n)\ndef permutations(items):\n \"\"\" take all items, order matters \"\"\"\n return combinations(items, len(items))\n\n#reflection generation routines\n#for these: H = [h,k,l]; A is as used in calc_rDsq; G - inv metric tensor, g - metric tensor; \n# cell - a,b,c,alp,bet,gam in A & deg\n \ndef Pos2dsp(Inst,pos):\n ''' convert powder pattern position (2-theta or TOF, musec) to d-spacing\n '''\n if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:\n wave = G2mth.getWave(Inst)\n return wave/(2.0*sind((pos-Inst.get('Zero',[0,0])[1])/2.0))\n else: #'T'OF - ignore difB\n return TOF2dsp(Inst,pos)\n \ndef TOF2dsp(Inst,Pos):\n ''' convert powder pattern TOF, musec to d-spacing by successive approximation\n Pos can be numpy array\n '''\n def func(d,pos,Inst): \n return (pos-Inst['difA'][1]*d**2-Inst['Zero'][1]-Inst['difB'][1]/d)/Inst['difC'][1]\n dsp0 = np.ones_like(Pos)\n N = 0\n while True: #successive approximations\n dsp = func(dsp0,Pos,Inst)\n if np.allclose(dsp,dsp0,atol=0.000001):\n return dsp\n dsp0 = dsp\n N += 1\n if N > 10:\n return dsp\n \ndef Dsp2pos(Inst,dsp):\n ''' convert d-spacing to powder pattern position (2-theta or TOF, musec)\n '''\n if 'C' in Inst['Type'][0] or 'PKS' in Inst['Type'][0]:\n wave = G2mth.getWave(Inst)\n val = min(0.995,wave/(2.*dsp)) #set max at 168deg\n pos = 2.0*asind(val)+Inst.get('Zero',[0,0])[1] \n else: #'T'OF\n pos = Inst['difC'][1]*dsp+Inst['Zero'][1]+Inst['difA'][1]*dsp**2+Inst.get('difB',[0,0,False])[1]/dsp\n return pos\n \ndef getPeakPos(dataType,parmdict,dsp):\n ''' convert d-spacing to powder pattern position (2-theta or TOF, musec)\n '''\n if 'C' in dataType:\n pos = 2.0*asind(parmdict['Lam']/(2.*dsp))+parmdict['Zero']\n else: #'T'OF\n pos = parmdict['difC']*dsp+parmdict['difA']*dsp**2+parmdict['difB']/dsp+parmdict['Zero']\n return pos\n \ndef calc_rDsq(H,A):\n 'needs doc string'\n rdsq = H[0]*H[0]*A[0]+H[1]*H[1]*A[1]+H[2]*H[2]*A[2]+H[0]*H[1]*A[3]+H[0]*H[2]*A[4]+H[1]*H[2]*A[5]\n return rdsq\n \ndef calc_rDsq2(H,G):\n 'needs doc string'\n return np.inner(H,np.inner(G,H))\n \ndef calc_rDsqSS(H,A,vec):\n 'needs doc string'\n rdsq = calc_rDsq(H[:3]+(H[3]*vec).T,A)\n return rdsq\n \ndef calc_rDsqZ(H,A,Z,tth,lam):\n 'needs doc string'\n rdsq = calc_rDsq(H,A)+Z*sind(tth)*2.0*rpd/lam**2\n return rdsq\n \ndef calc_rDsqZSS(H,A,vec,Z,tth,lam):\n 'needs doc string'\n rdsq = calc_rDsq(H[:3]+(H[3][:,np.newaxis]*vec).T,A)+Z*sind(tth)*2.0*rpd/lam**2\n return rdsq\n \ndef calc_rDsqT(H,A,Z,tof,difC):\n 'needs doc string'\n rdsq = calc_rDsq(H,A)+Z/difC\n return rdsq\n \ndef calc_rDsqTSS(H,A,vec,Z,tof,difC):\n 'needs doc string'\n rdsq = calc_rDsq(H[:3]+(H[3][:,np.newaxis]*vec).T,A)+Z/difC\n return rdsq\n \ndef PlaneIntercepts(Amat,H,phase,stack):\n ''' find unit cell intercepts for a stack of hkl planes\n '''\n Steps = range(-1,2,2)\n if stack:\n Steps = range(-10,10,1)\n Stack = []\n Ux = np.array([[0,0],[1,0],[1,1],[0,1]])\n for step in Steps:\n HX = []\n for i in [0,1,2]:\n if H[i]:\n h,k,l = [(i+1)%3,(i+2)%3,(i+3)%3]\n for j in [0,1,2,3]:\n hx = [0,0,0]\n intcpt = ((phase)/360.+step-H[h]*Ux[j,0]-H[k]*Ux[j,1])/H[l]\n if 0. <= intcpt <= 1.: \n hx[h] = Ux[j,0]\n hx[k] = Ux[j,1]\n hx[l] = intcpt\n HX.append(hx)\n if len(HX)> 2:\n HX = np.array(HX)\n DX = np.inner(HX-HX[0],Amat)\n D = np.sqrt(np.sum(DX**2,axis=1))\n Dsort = np.argsort(D)\n HX = HX[Dsort]\n DX = DX[Dsort]\n D = D[Dsort]\n DX[1:,:] = DX[1:,:]/D[1:,nxs]\n A = 2.*np.ones(HX.shape[0])\n A[1:] = [np.dot(DX[1],dx) for dx in DX[1:]]\n HX = HX[np.argsort(A)]\n Stack.append(HX)\n return Stack\n \ndef MaxIndex(dmin,A):\n 'needs doc string'\n Hmax = [0,0,0]\n try:\n cell = A2cell(A)\n except:\n cell = [1.,1.,1.,90.,90.,90.]\n for i in range(3):\n Hmax[i] = int(round(cell[i]/dmin))\n return Hmax\n \ndef transposeHKLF(transMat,Super,refList):\n ''' Apply transformation matrix to hkl(m)\n param: transmat: 3x3 or 4x4 array\n param: Super: 0 or 1 for extra index\n param: refList list of h,k,l,....\n return: newRefs transformed list of h',k',l',,,\n return: badRefs list of noninteger h',k',l'...\n '''\n newRefs = np.copy(refList)\n badRefs = []\n for H in newRefs:\n newH = np.inner(transMat,H[:3+Super])\n H[:3+Super] = np.rint(newH)\n if not np.allclose(newH,H[:3+Super],atol=0.01):\n badRefs.append(newH)\n return newRefs,badRefs\n \ndef sortHKLd(HKLd,ifreverse,ifdup,ifSS=False):\n '''sort reflection list on d-spacing; can sort in either order\n\n :param HKLd: a list of [h,k,l,d,...];\n :param ifreverse: True for largest d first\n :param ifdup: True if duplicate d-spacings allowed\n :return: sorted reflection list\n '''\n T = []\n N = 3\n if ifSS:\n N = 4\n for i,H in enumerate(HKLd):\n if ifdup:\n T.append((H[N],i))\n else:\n T.append(H[N]) \n D = dict(zip(T,HKLd))\n T.sort()\n if ifreverse:\n T.reverse()\n X = []\n okey = ''\n for key in T: \n if key != okey: X.append(D[key]) #remove duplicate d-spacings\n okey = key\n return X\n \ndef SwapIndx(Axis,H):\n 'needs doc string'\n if Axis in [1,-1]:\n return H\n elif Axis in [2,-3]:\n return [H[1],H[2],H[0]]\n else:\n return [H[2],H[0],H[1]]\n \ndef Rh2Hx(Rh):\n 'needs doc string'\n Hx = [0,0,0]\n Hx[0] = Rh[0]-Rh[1]\n Hx[1] = Rh[1]-Rh[2]\n Hx[2] = np.sum(Rh)\n return Hx\n \ndef Hx2Rh(Hx):\n 'needs doc string'\n Rh = [0,0,0]\n itk = -Hx[0]+Hx[1]+Hx[2]\n if itk%3 != 0:\n return 0 #error - not rhombohedral reflection\n else:\n Rh[1] = itk//3\n Rh[0] = Rh[1]+Hx[0]\n Rh[2] = Rh[1]-Hx[1]\n if Rh[0] < 0:\n for i in range(3):\n Rh[i] = -Rh[i]\n return Rh\n \ndef CentCheck(Cent,H):\n 'needs doc string'\n h,k,l = H\n if Cent == 'A' and (k+l)%2:\n return False\n elif Cent == 'B' and (h+l)%2:\n return False\n elif Cent == 'C' and (h+k)%2:\n return False\n elif Cent == 'I' and (h+k+l)%2:\n return False\n elif Cent == 'F' and ((h+k)%2 or (h+l)%2 or (k+l)%2):\n return False\n elif Cent == 'R' and (-h+k+l)%3:\n return False\n else:\n return True\n \ndef GetBraviasNum(center,system):\n \"\"\"Determine the Bravais lattice number, as used in GenHBravais\n \n :param center: one of: 'P', 'C', 'I', 'F', 'R' (see SGLatt from GSASIIspc.SpcGroup)\n :param system: one of 'cubic', 'hexagonal', 'tetragonal', 'orthorhombic', 'trigonal' (for R)\n 'monoclinic', 'triclinic' (see SGSys from GSASIIspc.SpcGroup)\n :return: a number between 0 and 13 \n or throws a ValueError exception if the combination of center, system is not found (i.e. non-standard)\n\n \"\"\"\n if center.upper() == 'F' and system.lower() == 'cubic':\n return 0\n elif center.upper() == 'I' and system.lower() == 'cubic':\n return 1\n elif center.upper() == 'P' and system.lower() == 'cubic':\n return 2\n elif center.upper() == 'R' and system.lower() == 'trigonal':\n return 3\n elif center.upper() == 'P' and system.lower() == 'hexagonal':\n return 4\n elif center.upper() == 'I' and system.lower() == 'tetragonal':\n return 5\n elif center.upper() == 'P' and system.lower() == 'tetragonal':\n return 6\n elif center.upper() == 'F' and system.lower() == 'orthorhombic':\n return 7\n elif center.upper() == 'I' and system.lower() == 'orthorhombic':\n return 8\n elif center.upper() == 'A' and system.lower() == 'orthorhombic':\n return 9\n elif center.upper() == 'B' and system.lower() == 'orthorhombic':\n return 10\n elif center.upper() == 'C' and system.lower() == 'orthorhombic':\n return 11\n elif center.upper() == 'P' and system.lower() == 'orthorhombic':\n return 12\n elif center.upper() == 'C' and system.lower() == 'monoclinic':\n return 13\n elif center.upper() == 'P' and system.lower() == 'monoclinic':\n return 14\n elif center.upper() == 'P' and system.lower() == 'triclinic':\n return 15\n raise ValueError('non-standard Bravais lattice center=%s, cell=%s' % (center,system))\n\ndef GenHBravais(dmin,Bravais,A):\n \"\"\"Generate the positionally unique powder diffraction reflections\n \n :param dmin: minimum d-spacing in A\n :param Bravais: lattice type (see GetBraviasNum). Bravais is one of:\n \n * 0 F cubic\n * 1 I cubic\n * 2 P cubic\n * 3 R hexagonal (trigonal not rhombohedral)\n * 4 P hexagonal\n * 5 I tetragonal\n * 6 P tetragonal\n * 7 F orthorhombic\n * 8 I orthorhombic\n * 9 A orthorhombic\n * 10 B orthorhombic\n * 11 C orthorhombic\n * 12 P orthorhombic\n * 13 I monoclinic\n * 14 C monoclinic\n * 15 P monoclinic\n * 16 P triclinic\n \n :param A: reciprocal metric tensor elements as [G11,G22,G33,2*G12,2*G13,2*G23]\n :return: HKL unique d list of [h,k,l,d,-1] sorted with largest d first\n \n \"\"\"\n if Bravais in [9,]:\n Cent = 'A'\n elif Bravais in [10,]:\n Cent = 'B'\n elif Bravais in [11,14]:\n Cent = 'C'\n elif Bravais in [1,5,8,13]:\n Cent = 'I'\n elif Bravais in [0,7]:\n Cent = 'F'\n elif Bravais in [3]:\n Cent = 'R'\n else:\n Cent = 'P'\n Hmax = MaxIndex(dmin,A)\n dminsq = 1./(dmin**2)\n HKL = []\n if Bravais == 16: #triclinic\n for l in range(-Hmax[2],Hmax[2]+1):\n for k in range(-Hmax[1],Hmax[1]+1):\n hmin = 0\n if (k < 0): hmin = 1\n if (k ==0 and l < 0): hmin = 1\n for h in range(hmin,Hmax[0]+1):\n H=[h,k,l]\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n elif Bravais in [13,14,15]: #monoclinic - b unique\n Hmax = SwapIndx(2,Hmax)\n for h in range(Hmax[0]+1):\n for k in range(-Hmax[1],Hmax[1]+1):\n lmin = 0\n if k < 0:lmin = 1\n for l in range(lmin,Hmax[2]+1):\n [h,k,l] = SwapIndx(-2,[h,k,l])\n H = []\n if CentCheck(Cent,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n [h,k,l] = SwapIndx(2,[h,k,l])\n elif Bravais in [7,8,9,10,11,12]: #orthorhombic\n for h in range(Hmax[0]+1):\n for k in range(Hmax[1]+1):\n for l in range(Hmax[2]+1):\n H = []\n if CentCheck(Cent,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n elif Bravais in [5,6]: #tetragonal\n for l in range(Hmax[2]+1):\n for k in range(Hmax[1]+1):\n for h in range(k,Hmax[0]+1):\n H = []\n if CentCheck(Cent,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n elif Bravais in [3,4]:\n lmin = 0\n if Bravais == 3: lmin = -Hmax[2] #hexagonal/trigonal\n for l in range(lmin,Hmax[2]+1):\n for k in range(Hmax[1]+1):\n hmin = k\n if l < 0: hmin += 1\n for h in range(hmin,Hmax[0]+1):\n H = []\n if CentCheck(Cent,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n\n else: #cubic\n for l in range(Hmax[2]+1):\n for k in range(l,Hmax[1]+1):\n for h in range(k,Hmax[0]+1):\n H = []\n if CentCheck(Cent,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,rdsq2d(rdsq,6),-1])\n return sortHKLd(HKL,True,False)\n \ndef getHKLmax(dmin,SGData,A):\n 'finds maximum allowed hkl for given A within dmin'\n SGLaue = SGData['SGLaue']\n if SGLaue in ['3R','3mR']: #Rhombohedral axes\n Hmax = [0,0,0]\n cell = A2cell(A)\n aHx = cell[0]*math.sqrt(2.0*(1.0-cosd(cell[3])))\n cHx = cell[0]*math.sqrt(3.0*(1.0+2.0*cosd(cell[3])))\n Hmax[0] = Hmax[1] = int(round(aHx/dmin))\n Hmax[2] = int(round(cHx/dmin))\n #print Hmax,aHx,cHx\n else: # all others\n Hmax = MaxIndex(dmin,A)\n return Hmax\n \ndef GenHLaue(dmin,SGData,A):\n \"\"\"Generate the crystallographically unique powder diffraction reflections\n for a lattice and Bravais type\n \n :param dmin: minimum d-spacing\n :param SGData: space group dictionary with at least\n \n * 'SGLaue': Laue group symbol: one of '-1','2/m','mmm','4/m','6/m','4/mmm','6/mmm', '3m1', '31m', '3', '3R', '3mR', 'm3', 'm3m'\n * 'SGLatt': lattice centering: one of 'P','A','B','C','I','F'\n * 'SGUniq': code for unique monoclinic axis one of 'a','b','c' (only if 'SGLaue' is '2/m') otherwise an empty string\n \n :param A: reciprocal metric tensor elements as [G11,G22,G33,2*G12,2*G13,2*G23]\n :return: HKL = list of [h,k,l,d] sorted with largest d first and is unique \n part of reciprocal space ignoring anomalous dispersion\n \n \"\"\"\n import math\n SGLaue = SGData['SGLaue']\n SGLatt = SGData['SGLatt']\n SGUniq = SGData['SGUniq']\n #finds maximum allowed hkl for given A within dmin\n Hmax = getHKLmax(dmin,SGData,A)\n \n dminsq = 1./(dmin**2)\n HKL = []\n if SGLaue == '-1': #triclinic\n for l in range(-Hmax[2],Hmax[2]+1):\n for k in range(-Hmax[1],Hmax[1]+1):\n hmin = 0\n if (k < 0) or (k ==0 and l < 0): hmin = 1\n for h in range(hmin,Hmax[0]+1):\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,1./math.sqrt(rdsq)])\n elif SGLaue == '2/m': #monoclinic\n axisnum = 1 + ['a','b','c'].index(SGUniq)\n Hmax = SwapIndx(axisnum,Hmax)\n for h in range(Hmax[0]+1):\n for k in range(-Hmax[1],Hmax[1]+1):\n lmin = 0\n if k < 0:lmin = 1\n for l in range(lmin,Hmax[2]+1):\n [h,k,l] = SwapIndx(-axisnum,[h,k,l])\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,1./math.sqrt(rdsq)])\n [h,k,l] = SwapIndx(axisnum,[h,k,l])\n elif SGLaue in ['mmm','4/m','6/m']: #orthorhombic\n for l in range(Hmax[2]+1):\n for h in range(Hmax[0]+1):\n kmin = 1\n if SGLaue == 'mmm' or h ==0: kmin = 0\n for k in range(kmin,Hmax[1]+1):\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,1./math.sqrt(rdsq)])\n elif SGLaue in ['4/mmm','6/mmm']: #tetragonal & hexagonal\n for l in range(Hmax[2]+1):\n for h in range(Hmax[0]+1):\n for k in range(h+1):\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,1./math.sqrt(rdsq)])\n elif SGLaue in ['3m1','31m','3','3R','3mR']: #trigonals\n for l in range(-Hmax[2],Hmax[2]+1):\n hmin = 0\n if l < 0: hmin = 1\n for h in range(hmin,Hmax[0]+1):\n if SGLaue in ['3R','3']:\n kmax = h\n kmin = -int((h-1.)/2.)\n else:\n kmin = 0\n kmax = h\n if SGLaue in ['3m1','3mR'] and l < 0: kmax = h-1\n if SGLaue == '31m' and l < 0: kmin = 1\n for k in range(kmin,kmax+1):\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if SGLaue in ['3R','3mR']:\n H = Hx2Rh(H)\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([H[0],H[1],H[2],1./math.sqrt(rdsq)])\n else: #cubic\n for h in range(Hmax[0]+1):\n for k in range(h+1):\n lmin = 0\n lmax = k\n if SGLaue =='m3':\n lmax = h-1\n if h == k: lmax += 1\n for l in range(lmin,lmax+1):\n H = []\n if CentCheck(SGLatt,[h,k,l]): H=[h,k,l]\n if H:\n rdsq = calc_rDsq(H,A)\n if 0 < rdsq <= dminsq:\n HKL.append([h,k,l,1./math.sqrt(rdsq)])\n return sortHKLd(HKL,True,True)\n \ndef GenPfHKLs(nMax,SGData,A): \n \"\"\"Generate the unique pole figure reflections for a lattice and Bravais type. \n Min d-spacing=1.0A & no more than nMax returned\n \n :param nMax: maximum number of hkls returned\n :param SGData: space group dictionary with at least\n \n * 'SGLaue': Laue group symbol: one of '-1','2/m','mmm','4/m','6/m','4/mmm','6/mmm', '3m1', '31m', '3', '3R', '3mR', 'm3', 'm3m'\n * 'SGLatt': lattice centering: one of 'P','A','B','C','I','F'\n * 'SGUniq': code for unique monoclinic axis one of 'a','b','c' (only if 'SGLaue' is '2/m') otherwise an empty string\n \n :param A: reciprocal metric tensor elements as [G11,G22,G33,2*G12,2*G13,2*G23]\n :return: HKL = list of 'h k l' strings sorted with largest d first; no duplicate zones\n \n \"\"\"\n HKL = np.array(GenHLaue(1.0,SGData,A)).T[:3].T #strip d-spacings\n N = min(nMax,len(HKL))\n return ['%d %d %d'%(h[0],h[1],h[2]) for h in HKL[:N]] \n\ndef GenSSHLaue(dmin,SGData,SSGData,Vec,maxH,A):\n 'needs a doc string'\n ifMag = False\n if 'MagSpGrp' in SGData:\n ifMag = True\n HKLs = []\n vec = np.array(Vec)\n vstar = np.sqrt(calc_rDsq(vec,A)) #find extra needed for -n SS reflections\n dvec = 1./(maxH*vstar+1./dmin)\n HKL = GenHLaue(dvec,SGData,A) \n SSdH = [vec*h for h in range(-maxH,maxH+1)]\n SSdH = dict(zip(range(-maxH,maxH+1),SSdH))\n for h,k,l,d in HKL:\n ext = G2spc.GenHKLf([h,k,l],SGData)[0] #h,k,l must be integral values here\n if not ext and d >= dmin:\n HKLs.append([h,k,l,0,d])\n for dH in SSdH:\n if dH:\n DH = SSdH[dH]\n H = [h+DH[0],k+DH[1],l+DH[2]]\n d = 1./np.sqrt(calc_rDsq(H,A))\n if d >= dmin:\n HKLM = np.array([h,k,l,dH])\n if (G2spc.checkSSLaue([h,k,l,dH],SGData,SSGData) and G2spc.checkSSextc(HKLM,SSGData)) or ifMag:\n HKLs.append([h,k,l,dH,d]) \n return HKLs\n \ndef LaueUnique2(SGData,refList):\n ''' Impose Laue symmetry on hkl\n \n :param SGData: space group data from 'P '+Laue\n :param HKLF: np.array([[h,k,l,...]]) reflection set to be converted\n \n :return: HKLF new reflection array with imposed Laue symmetry\n '''\n for ref in refList:\n H = ref[:3]\n Uniq = G2spc.GenHKLf(H,SGData)[2]\n Uniq = G2mth.sortArray(G2mth.sortArray(G2mth.sortArray(Uniq,2),1),0)\n ref[:3] = Uniq[-1]\n return refList\n \ndef LaueUnique(Laue,HKLF):\n ''' Impose Laue symmetry on hkl\n \n :param str Laue: Laue symbol, as below\n \n centrosymmetric Laue groups::\n \n ['-1','2/m','112/m','2/m11','mmm','-42m','-4m2','4/mmm','-3',\n '-31m','-3m1','6/m','6/mmm','m3','m3m']\n \n noncentrosymmetric Laue groups::\n \n ['1','2','211','112','m','m11','11m','222','mm2','m2m','2mm',\n '4','-4','422','4mm','3','312','321','31m','3m1','6','-6',\n '622','6mm','-62m','-6m2','23','432','-43m']\n \n :param HKLF: np.array([[h,k,l,...]]) reflection set to be converted\n \n :returns: HKLF new reflection array with imposed Laue symmetry\n '''\n \n HKLFT = HKLF.T\n mat41 = np.array([[0,1,0],[-1,0,0],[0,0,1]]) #hkl -> k,-h,l\n mat43 = np.array([[0,-1,0],[1,0,0],[0,0,1]]) #hkl -> -k,h,l\n mat4bar = np.array([[0,-1,0],[1,0,0],[0,0,-1]]) #hkl -> k,-h,-l\n mat31 = np.array([[-1,-1,0],[1,0,0],[0,0,1]]) #hkl -> ihl = -h-k,h,l\n mat32 = np.array([[0,1,0],[-1,-1,0],[0,0,1]]) #hkl -> kil = k,-h-k,l\n matd3 = np.array([[0,1,0],[0,0,1],[1,0,0]]) #hkl -> k,l,h\n matd3q = np.array([[0,0,-1],[-1,0,0],[0,1,0]]) #hkl -> -l,-h,k\n matd3t = np.array([[0,0,-1],[1,0,0],[0,-1,0]]) #hkl -> -l,h,-k\n mat6 = np.array([[1,1,0],[-1,0,0],[0,0,1]]) #hkl -> h+k,-h,l really 65\n matdm = np.array([[0,1,0],[1,0,0],[0,0,1]]) #hkl -> k,h,l\n matdmp = np.array([[-1,-1,0],[0,1,0],[0,0,1]]) #hkl -> -h-k,k,l\n matkm = np.array([[-1,0,0],[1,1,0],[0,0,1]]) #hkl -> -h,h+k,l\n matd2 = np.array([[0,1,0],[1,0,0],[0,0,-1]]) #hkl -> k,h,-l\n matdm3 = np.array([[1,0,0],[0,0,1],[0,1,0]]) #hkl -> h,l,k\n mat2d43 = np.array([[0,1,0],[1,0,0],[0,0,1]]) #hkl -> k,-h,l\n matk2 = np.array([[-1,0,0],[1,1,0],[0,0,-1]]) #hkl -> -h,-i,-l\n #triclinic\n if Laue == '1': #ok\n pass\n elif Laue == '-1': #ok\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]<0),HKLFT[:3]*np.array([-1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([-1,-1,-1])[:,nxs],HKLFT[:3])\n #monoclinic\n #noncentrosymmetric - all ok\n elif Laue == '2': \n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([-1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '1 1 2':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]<0),HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n elif Laue == '2 1 1': \n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n elif Laue == 'm':\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n elif Laue == 'm 1 1':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n elif Laue == '1 1 m':\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n #centrosymmetric - all ok\n elif Laue == '2/m 1 1': \n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]*HKLFT[0]==0)&(HKLFT[1]<0),HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n elif Laue == '2/m':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]*HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '1 1 2/m':\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]*HKLFT[2]==0)&(HKLFT[0]<0),HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n #orthorhombic\n #noncentrosymmetric - all OK\n elif Laue == '2 2 2':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == 'm m 2':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n elif Laue == '2 m m': \n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == 'm 2 m':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n #centrosymmetric - all ok\n elif Laue == 'm m m':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n #tetragonal \n #noncentrosymmetric - all ok\n elif Laue == '4':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]>0),np.squeeze(np.inner(HKLF[:,:3],mat41[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-4': \n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[0]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[1]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '4 2 2':\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[1]<HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]==0,np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3]) #in lieu od 2-fold\n elif Laue == '4 m m':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-4 2 m':\n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[0]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[1]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '-4 m 2':\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[1]<=0),np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]<0),HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[1]==0),np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[0]>HKLFT[1]),np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n #centrosymmetric - all ok\n elif Laue == '4/m':\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]>0),np.squeeze(np.inner(HKLF[:,:3],mat41[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '4/m m m':\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],mat41[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n #trigonal - all hex cell\n #noncentrosymmetric - all ok\n elif Laue == '3':\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '3 1 2':\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matk2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],matk2[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '3 2 1':\n HKLFT[:3] = np.where(HKLFT[0]<=-2*HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<-2*HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>0)&(HKLFT[1]==HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T\n HKLFT[:3] = np.where((HKLFT[0]!=0)&(HKLFT[2]>0)&(HKLFT[0]==-2*HKLFT[1]),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '3 1 m':\n HKLFT[:3] = np.where(HKLFT[0]>=HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(2*HKLFT[1]<-HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]>-2*HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matdmp[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T\n elif Laue == '3 m 1':\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]+HKLFT[0])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],matkm[nxs,:,:])).T,HKLFT[:3])\n #centrosymmetric\n elif Laue == '-3': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([-1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[0]<0),-np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],-mat31[nxs,:,:])).T,HKLFT[:3]) \n elif Laue == '-3 m 1': #ok\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]+HKLFT[0])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],matkm[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[1]<HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-3 1 m': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([-1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=0,np.squeeze(np.inner(HKLF[:,:3],-mat31[nxs,:,:])).T,HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n #hexagonal\n #noncentrosymmetric\n elif Laue == '6': #ok\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]==0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-6': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '6 2 2': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[0]>HKLFT[1]),np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '6 m m': #ok\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]==0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]>HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-6 m 2': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matk2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat31[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],matk2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n elif Laue == '-6 2 m': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=-2*HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<-2*HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>0)&(HKLFT[1]==HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T\n HKLFT[:3] = np.where(HKLFT[2]<0,np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]>HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n #centrosymmetric\n elif Laue == '6/m': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]==0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '6/m m m': #ok\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]+HKLFT[1])<0,np.squeeze(np.inner(HKLF[:,:3],mat32[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,np.squeeze(np.inner(HKLF[:,:3],mat6[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]>HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],matdm.T[nxs,:,:])).T,HKLFT[:3])\n #cubic - all ok\n #noncentrosymmetric - \n elif Laue == '2 3': \n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]<0)&((HKLFT[0]>-HKLFT[2])|(HKLFT[1]>-HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3t[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]<0)&((HKLFT[0]>-HKLFT[2])|(HKLFT[1]>=-HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3t[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([-1,1,-1])[:,nxs],HKLFT[:3]) \n elif Laue == '4 3 2': \n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,-1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,np.squeeze(np.inner(HKLF[:,:3],mat43[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]==0)&(HKLFT[1]<HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matd2[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]==0,np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3]) #in lieu od 2-fold\n HKLFT[:3] = np.where((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2]),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2]),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]==0,np.squeeze(np.inner(HKLF[:,:3],mat2d43[nxs,:,:])).T,HKLFT[:3])\n elif Laue == '-4 3 m': \n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[0]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]<=0,HKLFT[:3]*np.array([-1,-1,1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where(HKLFT[1]<=0,np.squeeze(np.inner(HKLF[:,:3],mat4bar[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[1]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<HKLFT[0],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]==0)&(HKLFT[2]<0),HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&(HKLFT[1]<HKLFT[0]),np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([-1,1,-1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[0]<0)&(HKLFT[2]<-HKLFT[0])&(HKLFT[1]>HKLFT[2]),np.squeeze(np.inner(HKLF[:,:3],matd3q[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[0]<0)&(HKLFT[2]>=-HKLFT[0])&(HKLFT[1]>HKLFT[2]),np.squeeze(np.inner(HKLF[:,:3],matdm3[nxs,:,:])).T,HKLFT[:3])\n #centrosymmetric \n elif Laue == 'm 3':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n elif Laue == 'm 3 m':\n HKLFT[:3] = np.where(HKLFT[0]<0,HKLFT[:3]*np.array([-1,1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[1]<0,HKLFT[:3]*np.array([1,-1,1])[:,nxs],HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[2]<0,HKLFT[:3]*np.array([1,1,-1])[:,nxs],HKLFT[:3]) \n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where((HKLFT[2]>=0)&((HKLFT[0]>=HKLFT[2])|(HKLFT[1]>HKLFT[2])),np.squeeze(np.inner(HKLF[:,:3],matd3[nxs,:,:])).T,HKLFT[:3])\n HKLFT[:3] = np.where(HKLFT[0]>HKLFT[1],np.squeeze(np.inner(HKLF[:,:3],matdm[nxs,:,:])).T,HKLFT[:3])\n return HKLFT.T\n \n\n#Spherical harmonics routines\ndef OdfChk(SGLaue,L,M):\n 'needs doc string'\n if not L%2 and abs(M) <= L:\n if SGLaue == '0': #cylindrical symmetry\n if M == 0: return True\n elif SGLaue == '-1':\n return True\n elif SGLaue == '2/m':\n if not abs(M)%2: return True\n elif SGLaue == 'mmm':\n if not abs(M)%2 and M >= 0: return True\n elif SGLaue == '4/m':\n if not abs(M)%4: return True\n elif SGLaue == '4/mmm':\n if not abs(M)%4 and M >= 0: return True\n elif SGLaue in ['3R','3']:\n if not abs(M)%3: return True\n elif SGLaue in ['3mR','3m1','31m']:\n if not abs(M)%3 and M >= 0: return True\n elif SGLaue == '6/m':\n if not abs(M)%6: return True\n elif SGLaue == '6/mmm':\n if not abs(M)%6 and M >= 0: return True\n elif SGLaue == 'm3':\n if M > 0:\n if L%12 == 2:\n if M <= L//12: return True\n else:\n if M <= L//12+1: return True\n elif SGLaue == 'm3m':\n if M > 0:\n if L%12 == 2:\n if M <= L//12: return True\n else:\n if M <= L//12+1: return True\n return False\n \ndef GenSHCoeff(SGLaue,SamSym,L,IfLMN=True):\n 'needs doc string'\n coeffNames = []\n for iord in [2*i+2 for i in range(L//2)]:\n for m in [i-iord for i in range(2*iord+1)]:\n if OdfChk(SamSym,iord,m):\n for n in [i-iord for i in range(2*iord+1)]:\n if OdfChk(SGLaue,iord,n):\n if IfLMN:\n coeffNames.append('C(%d,%d,%d)'%(iord,m,n))\n else:\n coeffNames.append('C(%d,%d)'%(iord,n))\n return coeffNames\n \ndef CrsAng(H,cell,SGData):\n 'needs doc string'\n a,b,c,al,be,ga = cell\n SQ3 = 1.732050807569\n H1 = np.array([1,0,0])\n H2 = np.array([0,1,0])\n H3 = np.array([0,0,1])\n H4 = np.array([1,1,1])\n G,g = cell2Gmat(cell)\n Laue = SGData['SGLaue']\n Naxis = SGData['SGUniq']\n if len(H.shape) == 1:\n DH = np.inner(H,np.inner(G,H))\n else:\n DH = np.array([np.inner(h,np.inner(G,h)) for h in H])\n if Laue == '2/m':\n if Naxis == 'a':\n DR = np.inner(H1,np.inner(G,H1))\n DHR = np.inner(H,np.inner(G,H1))\n elif Naxis == 'b':\n DR = np.inner(H2,np.inner(G,H2))\n DHR = np.inner(H,np.inner(G,H2))\n else:\n DR = np.inner(H3,np.inner(G,H3))\n DHR = np.inner(H,np.inner(G,H3))\n elif Laue in ['R3','R3m']:\n DR = np.inner(H4,np.inner(G,H4))\n DHR = np.inner(H,np.inner(G,H4))\n else:\n DR = np.inner(H3,np.inner(G,H3))\n DHR = np.inner(H,np.inner(G,H3))\n DHR /= np.sqrt(DR*DH)\n phi = np.where(DHR <= 1.0,acosd(DHR),0.0)\n if Laue == '-1':\n BA = H.T[1]*a/(b-H.T[0]*cosd(ga))\n BB = H.T[0]*sind(ga)**2\n elif Laue == '2/m':\n if Naxis == 'a':\n BA = H.T[2]*b/(c-H.T[1]*cosd(al))\n BB = H.T[1]*sind(al)**2\n elif Naxis == 'b':\n BA = H.T[0]*c/(a-H.T[2]*cosd(be))\n BB = H.T[2]*sind(be)**2\n else:\n BA = H.T[1]*a/(b-H.T[0]*cosd(ga))\n BB = H.T[0]*sind(ga)**2\n elif Laue in ['mmm','4/m','4/mmm']:\n BA = H.T[1]*a\n BB = H.T[0]*b\n elif Laue in ['3R','3mR']:\n BA = H.T[0]+H.T[1]-2.0*H.T[2]\n BB = SQ3*(H.T[0]-H.T[1])\n elif Laue in ['m3','m3m']:\n BA = H.T[1]\n BB = H.T[0]\n else:\n BA = H.T[0]+2.0*H.T[1]\n BB = SQ3*H.T[0]\n beta = atan2d(BA,BB)\n return phi,beta\n \ndef SamAng(Tth,Gangls,Sangl,IFCoup):\n \"\"\"Compute sample orientation angles vs laboratory coord. system\n\n :param Tth: Signed theta \n :param Gangls: Sample goniometer angles phi,chi,omega,azmuth \n :param Sangl: Sample angle zeros om-0, chi-0, phi-0 \n :param IFCoup: True if omega & 2-theta coupled in CW scan\n :returns: \n psi,gam: Sample odf angles \n dPSdA,dGMdA: Angle zero derivatives\n \"\"\" \n \n if IFCoup:\n GSomeg = sind(Gangls[2]+Tth)\n GComeg = cosd(Gangls[2]+Tth)\n else:\n GSomeg = sind(Gangls[2])\n GComeg = cosd(Gangls[2])\n GSTth = sind(Tth)\n GCTth = cosd(Tth) \n GSazm = sind(Gangls[3])\n GCazm = cosd(Gangls[3])\n GSchi = sind(Gangls[1])\n GCchi = cosd(Gangls[1])\n GSphi = sind(Gangls[0]+Sangl[2])\n GCphi = cosd(Gangls[0]+Sangl[2])\n SSomeg = sind(Sangl[0])\n SComeg = cosd(Sangl[0])\n SSchi = sind(Sangl[1])\n SCchi = cosd(Sangl[1])\n AT = -GSTth*GComeg+GCTth*GCazm*GSomeg\n BT = GSTth*GSomeg+GCTth*GCazm*GComeg\n CT = -GCTth*GSazm*GSchi\n DT = -GCTth*GSazm*GCchi\n \n BC1 = -AT*GSphi+(CT+BT*GCchi)*GCphi\n BC2 = DT-BT*GSchi\n BC3 = AT*GCphi+(CT+BT*GCchi)*GSphi\n \n BC = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg \n psi = acosd(BC)\n \n BD = 1.0-BC**2\n C = np.where(BD>1.e-6,rpd/np.sqrt(BD),0.)\n dPSdA = [-C*(-BC1*SSomeg*SCchi-BC2*SSomeg*SSchi-BC3*SComeg),\n -C*(-BC1*SComeg*SSchi+BC2*SComeg*SCchi),\n -C*(-BC1*SSomeg-BC3*SComeg*SCchi)]\n \n BA = -BC1*SSchi+BC2*SCchi\n BB = BC1*SSomeg*SCchi+BC2*SSomeg*SSchi+BC3*SComeg\n gam = atan2d(BB,BA)\n\n BD = (BA**2+BB**2)/rpd\n\n dBAdO = 0\n dBAdC = -BC1*SCchi-BC2*SSchi\n dBAdF = BC3*SSchi\n \n dBBdO = BC1*SComeg*SCchi+BC2*SComeg*SSchi-BC3*SSomeg\n dBBdC = -BC1*SSomeg*SSchi+BC2*SSomeg*SCchi\n dBBdF = BC1*SComeg-BC3*SSomeg*SCchi\n \n dGMdA = np.where(BD > 1.e-6,[(BA*dBBdO-BB*dBAdO)/BD,(BA*dBBdC-BB*dBAdC)/BD, \\\n (BA*dBBdF-BB*dBAdF)/BD],[np.zeros_like(BD),np.zeros_like(BD),np.zeros_like(BD)])\n \n return psi,gam,dPSdA,dGMdA\n\nBOH = {\n'L=2':[[],[],[]],\n'L=4':[[0.30469720,0.36418281],[],[]],\n'L=6':[[-0.14104740,0.52775103],[],[]],\n'L=8':[[0.28646862,0.21545346,0.32826995],[],[]],\n'L=10':[[-0.16413497,0.33078546,0.39371345],[],[]],\n'L=12':[[0.26141975,0.27266871,0.03277460,0.32589402],\n [0.09298802,-0.23773812,0.49446631,0.0],[]],\n'L=14':[[-0.17557309,0.25821932,0.27709173,0.33645360],[],[]],\n'L=16':[[0.24370673,0.29873515,0.06447688,0.00377,0.32574495],\n [0.12039646,-0.25330128,0.23950998,0.40962508,0.0],[]],\n'L=18':[[-0.16914245,0.17017340,0.34598142,0.07433932,0.32696037],\n [-0.06901768,0.16006562,-0.24743528,0.47110273,0.0],[]],\n'L=20':[[0.23067026,0.31151832,0.09287682,0.01089683,0.00037564,0.32573563],\n [0.13615420,-0.25048007,0.12882081,0.28642879,0.34620433,0.0],[]],\n'L=22':[[-0.16109560,0.10244188,0.36285175,0.13377513,0.01314399,0.32585583],\n [-0.09620055,0.20244115,-0.22389483,0.17928946,0.42017231,0.0],[]],\n'L=24':[[0.22050742,0.31770654,0.11661736,0.02049853,0.00150861,0.00003426,0.32573505],\n [0.13651722,-0.21386648,0.00522051,0.33939435,0.10837396,0.32914497,0.0],\n [0.05378596,-0.11945819,0.16272298,-0.26449730,0.44923956,0.0,0.0]],\n'L=26':[[-0.15435003,0.05261630,0.35524646,0.18578869,0.03259103,0.00186197,0.32574594],\n [-0.11306511,0.22072681,-0.18706142,0.05439948,0.28122966,0.35634355,0.0],[]],\n'L=28':[[0.21225019,0.32031716,0.13604702,0.03132468,0.00362703,0.00018294,0.00000294,0.32573501],\n [0.13219496,-0.17206256,-0.08742608,0.32671661,0.17973107,0.02567515,0.32619598,0.0],\n [0.07989184,-0.16735346,0.18839770,-0.20705337,0.12926808,0.42715602,0.0,0.0]],\n'L=30':[[-0.14878368,0.01524973,0.33628434,0.22632587,0.05790047,0.00609812,0.00022898,0.32573594],\n [-0.11721726,0.20915005,-0.11723436,-0.07815329,0.31318947,0.13655742,0.33241385,0.0],\n [-0.04297703,0.09317876,-0.11831248,0.17355132,-0.28164031,0.42719361,0.0,0.0]],\n'L=32':[[0.20533892,0.32087437,0.15187897,0.04249238,0.00670516,0.00054977,0.00002018,0.00000024,0.32573501],\n [0.12775091,-0.13523423,-0.14935701,0.28227378,0.23670434,0.05661270,0.00469819,0.32578978,0.0],\n [0.09703829,-0.19373733,0.18610682,-0.14407046,0.00220535,0.26897090,0.36633402,0.0,0.0]],\n'L=34':[[-0.14409234,-0.01343681,0.31248977,0.25557722,0.08571889,0.01351208,0.00095792,0.00002550,0.32573508],\n [-0.11527834,0.18472133,-0.04403280,-0.16908618,0.27227021,0.21086614,0.04041752,0.32688152,0.0],\n [-0.06773139,0.14120811,-0.15835721,0.18357456,-0.19364673,0.08377174,0.43116318,0.0,0.0]]\n}\n\nLnorm = lambda L: 4.*np.pi/(2.0*L+1.)\n\ndef GetKcl(L,N,SGLaue,phi,beta):\n 'needs doc string'\n import pytexture as ptx\n if SGLaue in ['m3','m3m']:\n if 'array' in str(type(phi)) and np.any(phi.shape):\n Kcl = np.zeros_like(phi)\n else:\n Kcl = 0.\n for j in range(0,L+1,4):\n im = j//4\n if 'array' in str(type(phi)) and np.any(phi.shape):\n pcrs = ptx.pyplmpsi(L,j,len(phi),phi)[0]\n else:\n pcrs = ptx.pyplmpsi(L,j,1,phi)[0]\n Kcl += BOH['L=%d'%(L)][N-1][im]*pcrs*cosd(j*beta) \n else:\n if 'array' in str(type(phi)) and np.any(phi.shape):\n pcrs = ptx.pyplmpsi(L,N,len(phi),phi)[0]\n else:\n pcrs = ptx.pyplmpsi(L,N,1,phi)[0]\n pcrs *= RSQ2PI\n if N:\n pcrs *= SQ2\n if SGLaue in ['mmm','4/mmm','6/mmm','R3mR','3m1','31m']:\n if SGLaue in ['3mR','3m1','31m']: \n if N%6 == 3:\n Kcl = pcrs*sind(N*beta)\n else:\n Kcl = pcrs*cosd(N*beta)\n else:\n Kcl = pcrs*cosd(N*beta)\n else:\n Kcl = pcrs*(cosd(N*beta)+sind(N*beta))\n return Kcl\n \ndef GetKsl(L,M,SamSym,psi,gam):\n 'needs doc string'\n import pytexture as ptx\n if 'array' in str(type(psi)) and np.any(psi.shape):\n psrs,dpdps = ptx.pyplmpsi(L,M,len(psi),psi)\n else:\n psrs,dpdps = ptx.pyplmpsi(L,M,1,psi)\n psrs *= RSQ2PI\n dpdps *= RSQ2PI\n if M:\n psrs *= SQ2\n dpdps *= SQ2\n if SamSym in ['mmm',]:\n dum = cosd(M*gam)\n Ksl = psrs*dum\n dKsdp = dpdps*dum\n dKsdg = -psrs*M*sind(M*gam)\n else:\n dum = cosd(M*gam)+sind(M*gam)\n Ksl = psrs*dum\n dKsdp = dpdps*dum\n dKsdg = psrs*M*(-sind(M*gam)+cosd(M*gam))\n return Ksl,dKsdp,dKsdg \n \ndef GetKclKsl(L,N,SGLaue,psi,phi,beta):\n \"\"\"\n This is used for spherical harmonics description of preferred orientation;\n cylindrical symmetry only (M=0) and no sample angle derivatives returned\n \"\"\"\n import pytexture as ptx\n Ksl,x = ptx.pyplmpsi(L,0,1,psi)\n Ksl *= RSQ2PI\n if SGLaue in ['m3','m3m']:\n Kcl = 0.0\n for j in range(0,L+1,4):\n im = j//4\n pcrs,dum = ptx.pyplmpsi(L,j,1,phi)\n Kcl += BOH['L=%d'%(L)][N-1][im]*pcrs*cosd(j*beta) \n else:\n pcrs,dum = ptx.pyplmpsi(L,N,1,phi)\n pcrs *= RSQ2PI\n if N:\n pcrs *= SQ2\n if SGLaue in ['mmm','4/mmm','6/mmm','R3mR','3m1','31m']:\n if SGLaue in ['3mR','3m1','31m']: \n if N%6 == 3:\n Kcl = pcrs*sind(N*beta)\n else:\n Kcl = pcrs*cosd(N*beta)\n else:\n Kcl = pcrs*cosd(N*beta)\n else:\n Kcl = pcrs*(cosd(N*beta)+sind(N*beta))\n return Kcl*Ksl,Lnorm(L)\n \ndef Glnh(Start,SHCoef,psi,gam,SamSym):\n 'needs doc string'\n import pytexture as ptx\n\n if Start:\n ptx.pyqlmninit()\n Start = False\n Fln = np.zeros(len(SHCoef))\n for i,term in enumerate(SHCoef):\n l,m,n = eval(term.strip('C'))\n pcrs,dum = ptx.pyplmpsi(l,m,1,psi)\n pcrs *= RSQPI\n if m == 0:\n pcrs /= SQ2\n if SamSym in ['mmm',]:\n Ksl = pcrs*cosd(m*gam)\n else:\n Ksl = pcrs*(cosd(m*gam)+sind(m*gam))\n Fln[i] = SHCoef[term]*Ksl*Lnorm(l)\n ODFln = dict(zip(SHCoef.keys(),list(zip(SHCoef.values(),Fln))))\n return ODFln\n\ndef Flnh(Start,SHCoef,phi,beta,SGData):\n 'needs doc string'\n import pytexture as ptx\n \n if Start:\n ptx.pyqlmninit()\n Start = False\n Fln = np.zeros(len(SHCoef))\n for i,term in enumerate(SHCoef):\n l,m,n = eval(term.strip('C'))\n if SGData['SGLaue'] in ['m3','m3m']:\n Kcl = 0.0\n for j in range(0,l+1,4):\n im = j//4\n pcrs,dum = ptx.pyplmpsi(l,j,1,phi)\n Kcl += BOH['L='+str(l)][n-1][im]*pcrs*cosd(j*beta) \n else: #all but cubic\n pcrs,dum = ptx.pyplmpsi(l,n,1,phi)\n pcrs *= RSQPI\n if n == 0:\n pcrs /= SQ2\n if SGData['SGLaue'] in ['mmm','4/mmm','6/mmm','R3mR','3m1','31m']:\n if SGData['SGLaue'] in ['3mR','3m1','31m']: \n if n%6 == 3:\n Kcl = pcrs*sind(n*beta)\n else:\n Kcl = pcrs*cosd(n*beta)\n else:\n Kcl = pcrs*cosd(n*beta)\n else:\n Kcl = pcrs*(cosd(n*beta)+sind(n*beta))\n Fln[i] = SHCoef[term]*Kcl*Lnorm(l)\n ODFln = dict(zip(SHCoef.keys(),list(zip(SHCoef.values(),Fln))))\n return ODFln\n \ndef polfcal(ODFln,SamSym,psi,gam):\n '''Perform a pole figure computation.\n Note that the the number of gam values must either be 1 or must\n match psi. Updated for numpy 1.8.0\n '''\n import pytexture as ptx\n PolVal = np.ones_like(psi)\n for term in ODFln:\n if abs(ODFln[term][1]) > 1.e-3:\n l,m,n = eval(term.strip('C'))\n psrs,dum = ptx.pyplmpsi(l,m,len(psi),psi)\n if SamSym in ['-1','2/m']:\n if m:\n Ksl = RSQPI*psrs*(cosd(m*gam)+sind(m*gam))\n else:\n Ksl = RSQPI*psrs/SQ2\n else:\n if m:\n Ksl = RSQPI*psrs*cosd(m*gam)\n else:\n Ksl = RSQPI*psrs/SQ2\n PolVal += ODFln[term][1]*Ksl\n return PolVal\n \ndef invpolfcal(ODFln,SGData,phi,beta):\n 'needs doc string'\n import pytexture as ptx\n \n invPolVal = np.ones_like(beta)\n for term in ODFln:\n if abs(ODFln[term][1]) > 1.e-3:\n l,m,n = eval(term.strip('C'))\n if SGData['SGLaue'] in ['m3','m3m']:\n Kcl = 0.0\n for j in range(0,l+1,4):\n im = j//4\n pcrs,dum = ptx.pyplmpsi(l,j,len(beta),phi)\n Kcl += BOH['L=%d'%(l)][n-1][im]*pcrs*cosd(j*beta) \n else: #all but cubic\n pcrs,dum = ptx.pyplmpsi(l,n,len(beta),phi)\n pcrs *= RSQPI\n if n == 0:\n pcrs /= SQ2\n if SGData['SGLaue'] in ['mmm','4/mmm','6/mmm','R3mR','3m1','31m']:\n if SGData['SGLaue'] in ['3mR','3m1','31m']: \n if n%6 == 3:\n Kcl = pcrs*sind(n*beta)\n else:\n Kcl = pcrs*cosd(n*beta)\n else:\n Kcl = pcrs*cosd(n*beta)\n else:\n Kcl = pcrs*(cosd(n*beta)+sind(n*beta))\n invPolVal += ODFln[term][1]*Kcl \n return invPolVal\n \n \ndef textureIndex(SHCoef):\n 'needs doc string'\n Tindx = 1.0\n for term in SHCoef:\n l = eval(term.strip('C'))[0]\n Tindx += SHCoef[term]**2/(2.0*l+1.)\n return Tindx\n \n# self-test materials follow. \nselftestlist = []\n'''Defines a list of self-tests'''\nselftestquiet = True\ndef _ReportTest():\n 'Report name and doc string of current routine when ``selftestquiet`` is False'\n if not selftestquiet:\n import inspect\n caller = inspect.stack()[1][3]\n doc = eval(caller).__doc__\n if doc is not None:\n print('testing '+__file__+' with '+caller+' ('+doc+')')\n else:\n print('testing '+__file__()+\" with \"+caller)\nNeedTestData = True\ndef TestData():\n array = np.array\n global NeedTestData\n NeedTestData = False\n global CellTestData\n # output from uctbx computed on platform darwin on 2010-05-28\n CellTestData = [\n# cell, g, G, cell*, V, V*\n [(4, 4, 4, 90, 90, 90), \n array([[ 1.60000000e+01, 9.79717439e-16, 9.79717439e-16],\n [ 9.79717439e-16, 1.60000000e+01, 9.79717439e-16],\n [ 9.79717439e-16, 9.79717439e-16, 1.60000000e+01]]), array([[ 6.25000000e-02, 3.82702125e-18, 3.82702125e-18],\n [ 3.82702125e-18, 6.25000000e-02, 3.82702125e-18],\n [ 3.82702125e-18, 3.82702125e-18, 6.25000000e-02]]), (0.25, 0.25, 0.25, 90.0, 90.0, 90.0), 64.0, 0.015625],\n# cell, g, G, cell*, V, V*\n [(4.0999999999999996, 5.2000000000000002, 6.2999999999999998, 100, 80, 130), \n array([[ 16.81 , -13.70423184, 4.48533243],\n [-13.70423184, 27.04 , -5.6887143 ],\n [ 4.48533243, -5.6887143 , 39.69 ]]), array([[ 0.10206349, 0.05083339, -0.00424823],\n [ 0.05083339, 0.06344997, 0.00334956],\n [-0.00424823, 0.00334956, 0.02615544]]), (0.31947376387537696, 0.25189277536327803, 0.16172643497798223, 85.283666420376008, 94.716333579624006, 50.825714168082683), 100.98576357983838, 0.0099023858863968445],\n# cell, g, G, cell*, V, V*\n [(3.5, 3.5, 6, 90, 90, 120), \n array([[ 1.22500000e+01, -6.12500000e+00, 1.28587914e-15],\n [ -6.12500000e+00, 1.22500000e+01, 1.28587914e-15],\n [ 1.28587914e-15, 1.28587914e-15, 3.60000000e+01]]), array([[ 1.08843537e-01, 5.44217687e-02, 3.36690552e-18],\n [ 5.44217687e-02, 1.08843537e-01, 3.36690552e-18],\n [ 3.36690552e-18, 3.36690552e-18, 2.77777778e-02]]), (0.32991443953692895, 0.32991443953692895, 0.16666666666666669, 90.0, 90.0, 60.000000000000021), 63.652867178156257, 0.015710211406520427],\n ]\n global CoordTestData\n CoordTestData = [\n# cell, ((frac, ortho),...)\n ((4,4,4,90,90,90,), [\n ((0.10000000000000001, 0.0, 0.0),(0.40000000000000002, 0.0, 0.0)),\n ((0.0, 0.10000000000000001, 0.0),(2.4492935982947065e-17, 0.40000000000000002, 0.0)),\n ((0.0, 0.0, 0.10000000000000001),(2.4492935982947065e-17, -2.4492935982947065e-17, 0.40000000000000002)),\n ((0.10000000000000001, 0.20000000000000001, 0.29999999999999999),(0.40000000000000013, 0.79999999999999993, 1.2)),\n ((0.20000000000000001, 0.29999999999999999, 0.10000000000000001),(0.80000000000000016, 1.2, 0.40000000000000002)),\n ((0.29999999999999999, 0.20000000000000001, 0.10000000000000001),(1.2, 0.80000000000000004, 0.40000000000000002)),\n ((0.5, 0.5, 0.5),(2.0, 1.9999999999999998, 2.0)),\n]),\n# cell, ((frac, ortho),...)\n ((4.1,5.2,6.3,100,80,130,), [\n ((0.10000000000000001, 0.0, 0.0),(0.40999999999999998, 0.0, 0.0)),\n ((0.0, 0.10000000000000001, 0.0),(-0.33424955703700043, 0.39834311042186865, 0.0)),\n ((0.0, 0.0, 0.10000000000000001),(0.10939835193016617, -0.051013289294572106, 0.6183281045774256)),\n ((0.10000000000000001, 0.20000000000000001, 0.29999999999999999),(0.069695941716497567, 0.64364635296002093, 1.8549843137322766)),\n ((0.20000000000000001, 0.29999999999999999, 0.10000000000000001),(-0.073350319180835066, 1.1440160419710339, 0.6183281045774256)),\n ((0.29999999999999999, 0.20000000000000001, 0.10000000000000001),(0.67089923785616512, 0.74567293154916525, 0.6183281045774256)),\n ((0.5, 0.5, 0.5),(0.92574397446582857, 1.7366491056364828, 3.0916405228871278)),\n]),\n# cell, ((frac, ortho),...)\n ((3.5,3.5,6,90,90,120,), [\n ((0.10000000000000001, 0.0, 0.0),(0.35000000000000003, 0.0, 0.0)),\n ((0.0, 0.10000000000000001, 0.0),(-0.17499999999999993, 0.3031088913245536, 0.0)),\n ((0.0, 0.0, 0.10000000000000001),(3.6739403974420595e-17, -3.6739403974420595e-17, 0.60000000000000009)),\n ((0.10000000000000001, 0.20000000000000001, 0.29999999999999999),(2.7675166561703527e-16, 0.60621778264910708, 1.7999999999999998)),\n ((0.20000000000000001, 0.29999999999999999, 0.10000000000000001),(0.17500000000000041, 0.90932667397366063, 0.60000000000000009)),\n ((0.29999999999999999, 0.20000000000000001, 0.10000000000000001),(0.70000000000000018, 0.6062177826491072, 0.60000000000000009)),\n ((0.5, 0.5, 0.5),(0.87500000000000067, 1.5155444566227676, 3.0)),\n]),\n]\n global LaueTestData #generated by GSAS\n LaueTestData = {\n 'R 3 m':[(4.,4.,6.,90.,90.,120.),((1,0,1,6),(1,0,-2,6),(0,0,3,2),(1,1,0,6),(2,0,-1,6),(2,0,2,6),\n (1,1,3,12),(1,0,4,6),(2,1,1,12),(2,1,-2,12),(3,0,0,6),(1,0,-5,6),(2,0,-4,6),(3,0,-3,6),(3,0,3,6),\n (0,0,6,2),(2,2,0,6),(2,1,4,12),(2,0,5,6),(3,1,-1,12),(3,1,2,12),(1,1,6,12),(2,2,3,12),(2,1,-5,12))],\n 'R 3':[(4.,4.,6.,90.,90.,120.),((1,0,1,6),(1,0,-2,6),(0,0,3,2),(1,1,0,6),(2,0,-1,6),(2,0,2,6),(1,1,3,6),\n (1,1,-3,6),(1,0,4,6),(3,-1,1,6),(2,1,1,6),(3,-1,-2,6),(2,1,-2,6),(3,0,0,6),(1,0,-5,6),(2,0,-4,6),\n (2,2,0,6),(3,0,3,6),(3,0,-3,6),(0,0,6,2),(3,-1,4,6),(2,0,5,6),(2,1,4,6),(4,-1,-1,6),(3,1,-1,6),\n (3,1,2,6),(4,-1,2,6),(2,2,-3,6),(1,1,-6,6),(1,1,6,6),(2,2,3,6),(2,1,-5,6),(3,-1,-5,6))],\n 'P 3':[(4.,4.,6.,90.,90.,120.),((0,0,1,2),(1,0,0,6),(1,0,1,6),(0,0,2,2),(1,0,-1,6),(1,0,2,6),(1,0,-2,6),\n (1,1,0,6),(0,0,3,2),(1,1,1,6),(1,1,-1,6),(1,0,3,6),(1,0,-3,6),(2,0,0,6),(2,0,-1,6),(1,1,-2,6),\n (1,1,2,6),(2,0,1,6),(2,0,-2,6),(2,0,2,6),(0,0,4,2),(1,1,-3,6),(1,1,3,6),(1,0,-4,6),(1,0,4,6),\n (2,0,-3,6),(2,1,0,6),(2,0,3,6),(3,-1,0,6),(2,1,1,6),(3,-1,-1,6),(2,1,-1,6),(3,-1,1,6),(1,1,4,6),\n (3,-1,2,6),(3,-1,-2,6),(1,1,-4,6),(0,0,5,2),(2,1,2,6),(2,1,-2,6),(3,0,0,6),(3,0,1,6),(2,0,4,6),\n (2,0,-4,6),(3,0,-1,6),(1,0,-5,6),(1,0,5,6),(3,-1,-3,6),(2,1,-3,6),(2,1,3,6),(3,-1,3,6),(3,0,-2,6),\n (3,0,2,6),(1,1,5,6),(1,1,-5,6),(2,2,0,6),(3,0,3,6),(3,0,-3,6),(0,0,6,2),(2,0,-5,6),(2,1,-4,6),\n (2,2,-1,6),(3,-1,-4,6),(2,2,1,6),(3,-1,4,6),(2,1,4,6),(2,0,5,6),(1,0,-6,6),(1,0,6,6),(4,-1,0,6),\n (3,1,0,6),(3,1,-1,6),(3,1,1,6),(4,-1,-1,6),(2,2,2,6),(4,-1,1,6),(2,2,-2,6),(3,1,2,6),(3,1,-2,6),\n (3,0,4,6),(3,0,-4,6),(4,-1,-2,6),(4,-1,2,6),(2,2,-3,6),(1,1,6,6),(1,1,-6,6),(2,2,3,6),(3,-1,5,6),\n (2,1,5,6),(2,1,-5,6),(3,-1,-5,6))],\n 'P 3 m 1':[(4.,4.,6.,90.,90.,120.),((0,0,1,2),(1,0,0,6),(1,0,-1,6),(1,0,1,6),(0,0,2,2),(1,0,-2,6),\n (1,0,2,6),(1,1,0,6),(0,0,3,2),(1,1,1,12),(1,0,-3,6),(1,0,3,6),(2,0,0,6),(1,1,2,12),(2,0,1,6),\n (2,0,-1,6),(0,0,4,2),(2,0,-2,6),(2,0,2,6),(1,1,3,12),(1,0,-4,6),(1,0,4,6),(2,0,3,6),(2,1,0,12),\n (2,0,-3,6),(2,1,1,12),(2,1,-1,12),(1,1,4,12),(2,1,2,12),(0,0,5,2),(2,1,-2,12),(3,0,0,6),(1,0,-5,6),\n (3,0,1,6),(3,0,-1,6),(1,0,5,6),(2,0,4,6),(2,0,-4,6),(2,1,3,12),(2,1,-3,12),(3,0,-2,6),(3,0,2,6),\n (1,1,5,12),(3,0,-3,6),(0,0,6,2),(2,2,0,6),(3,0,3,6),(2,1,4,12),(2,2,1,12),(2,0,5,6),(2,1,-4,12),\n (2,0,-5,6),(1,0,-6,6),(1,0,6,6),(3,1,0,12),(3,1,-1,12),(3,1,1,12),(2,2,2,12),(3,1,2,12),\n (3,0,4,6),(3,1,-2,12),(3,0,-4,6),(1,1,6,12),(2,2,3,12))],\n 'P 3 1 m':[(4.,4.,6.,90.,90.,120.),((0,0,1,2),(1,0,0,6),(0,0,2,2),(1,0,1,12),(1,0,2,12),(1,1,0,6),\n (0,0,3,2),(1,1,-1,6),(1,1,1,6),(1,0,3,12),(2,0,0,6),(2,0,1,12),(1,1,2,6),(1,1,-2,6),(2,0,2,12),\n (0,0,4,2),(1,1,-3,6),(1,1,3,6),(1,0,4,12),(2,1,0,12),(2,0,3,12),(2,1,1,12),(2,1,-1,12),(1,1,-4,6),\n (1,1,4,6),(0,0,5,2),(2,1,-2,12),(2,1,2,12),(3,0,0,6),(1,0,5,12),(2,0,4,12),(3,0,1,12),(2,1,-3,12),\n (2,1,3,12),(3,0,2,12),(1,1,5,6),(1,1,-5,6),(3,0,3,12),(0,0,6,2),(2,2,0,6),(2,1,-4,12),(2,0,5,12),\n (2,2,-1,6),(2,2,1,6),(2,1,4,12),(3,1,0,12),(1,0,6,12),(2,2,2,6),(3,1,-1,12),(2,2,-2,6),(3,1,1,12),\n (3,1,-2,12),(3,0,4,12),(3,1,2,12),(1,1,-6,6),(2,2,3,6),(2,2,-3,6),(1,1,6,6))],\n }\n \n global FLnhTestData\n FLnhTestData = [{\n 'C(4,0,0)': (0.965, 0.42760447),\n 'C(2,0,0)': (1.0122, -0.80233610),\n 'C(2,0,2)': (0.0061, 8.37491546E-03),\n 'C(6,0,4)': (-0.0898, 4.37985696E-02),\n 'C(6,0,6)': (-0.1369, -9.04081762E-02),\n 'C(6,0,0)': (0.5935, -0.18234928),\n 'C(4,0,4)': (0.1872, 0.16358127),\n 'C(6,0,2)': (0.6193, 0.27573633),\n 'C(4,0,2)': (-0.1897, 0.12530720)},[1,0,0]]\ndef test0():\n if NeedTestData: TestData()\n msg = 'test cell2Gmat, fillgmat, Gmat2cell'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n G, g = cell2Gmat(cell)\n assert np.allclose(G,tG),msg\n assert np.allclose(g,tg),msg\n tcell = Gmat2cell(g)\n assert np.allclose(cell,tcell),msg\n tcell = Gmat2cell(G)\n assert np.allclose(tcell,trcell),msg\nif __name__ == '__main__': selftestlist.append(test0)\n\ndef test1():\n 'test cell2A and A2Gmat'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test cell2A and A2Gmat'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n G, g = A2Gmat(cell2A(cell))\n assert np.allclose(G,tG),msg\n assert np.allclose(g,tg),msg\nif __name__ == '__main__': selftestlist.append(test1)\n\ndef test2():\n 'test Gmat2A, A2cell, A2Gmat, Gmat2cell'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test Gmat2A, A2cell, A2Gmat, Gmat2cell'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n G, g = cell2Gmat(cell)\n tcell = A2cell(Gmat2A(G))\n assert np.allclose(cell,tcell),msg\nif __name__ == '__main__': selftestlist.append(test2)\n\ndef test3():\n 'test invcell2Gmat'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test invcell2Gmat'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n G, g = invcell2Gmat(trcell)\n assert np.allclose(G,tG),msg\n assert np.allclose(g,tg),msg\nif __name__ == '__main__': selftestlist.append(test3)\n\ndef test4():\n 'test calc_rVsq, calc_rV, calc_V'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test calc_rVsq, calc_rV, calc_V'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n assert np.allclose(calc_rV(cell2A(cell)),trV), msg\n assert np.allclose(calc_V(cell2A(cell)),tV), msg\nif __name__ == '__main__': selftestlist.append(test4)\n\ndef test5():\n 'test A2invcell'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test A2invcell'\n for (cell, tg, tG, trcell, tV, trV) in CellTestData:\n rcell = A2invcell(cell2A(cell))\n assert np.allclose(rcell,trcell),msg\nif __name__ == '__main__': selftestlist.append(test5)\n\ndef test6():\n 'test cell2AB'\n _ReportTest()\n if NeedTestData: TestData()\n msg = 'test cell2AB'\n for (cell,coordlist) in CoordTestData:\n A,B = cell2AB(cell)\n for (frac,ortho) in coordlist:\n to = np.inner(A,frac)\n tf = np.inner(B,to)\n assert np.allclose(ortho,to), msg\n assert np.allclose(frac,tf), msg\n to = np.sum(A*frac,axis=1)\n tf = np.sum(B*to,axis=1)\n assert np.allclose(ortho,to), msg\n assert np.allclose(frac,tf), msg\nif __name__ == '__main__': selftestlist.append(test6)\n\ndef test7():\n 'test GetBraviasNum(...) and GenHBravais(...)'\n _ReportTest()\n import os.path\n import sys\n import GSASIIspc as spc\n testdir = os.path.join(os.path.split(os.path.abspath( __file__ ))[0],'testinp')\n if os.path.exists(testdir):\n if testdir not in sys.path: sys.path.insert(0,testdir)\n import sgtbxlattinp\n derror = 1e-4\n def indexmatch(hklin, hkllist, system):\n for hklref in hkllist:\n hklref = list(hklref)\n # these permutations are far from complete, but are sufficient to \n # allow the test to complete\n if system == 'cubic':\n permlist = [(1,2,3),(1,3,2),(2,1,3),(2,3,1),(3,1,2),(3,2,1),]\n elif system == 'monoclinic':\n permlist = [(1,2,3),(-1,2,-3)]\n else:\n permlist = [(1,2,3)]\n\n for perm in permlist:\n hkl = [abs(i) * hklin[abs(i)-1] / i for i in perm]\n if hkl == hklref: return True\n if [-i for i in hkl] == hklref: return True\n else:\n return False\n\n for key in sgtbxlattinp.sgtbx7:\n spdict = spc.SpcGroup(key)\n cell = sgtbxlattinp.sgtbx7[key][0]\n system = spdict[1]['SGSys']\n center = spdict[1]['SGLatt']\n\n bravcode = GetBraviasNum(center, system)\n\n g2list = GenHBravais(sgtbxlattinp.dmin, bravcode, cell2A(cell))\n\n assert len(sgtbxlattinp.sgtbx7[key][1]) == len(g2list), 'Reflection lists differ for %s' % key\n for h,k,l,d,num in g2list:\n for hkllist,dref in sgtbxlattinp.sgtbx7[key][1]: \n if abs(d-dref) < derror:\n if indexmatch((h,k,l,), hkllist, system):\n break\n else:\n assert 0,'No match for %s at %s (%s)' % ((h,k,l),d,key)\nif __name__ == '__main__': selftestlist.append(test7)\n\ndef test8():\n 'test GenHLaue'\n _ReportTest()\n import GSASIIspc as spc\n import sgtbxlattinp\n derror = 1e-4\n dmin = sgtbxlattinp.dmin\n\n def indexmatch(hklin, hklref, system, axis):\n # these permutations are far from complete, but are sufficient to \n # allow the test to complete\n if system == 'cubic':\n permlist = [(1,2,3),(1,3,2),(2,1,3),(2,3,1),(3,1,2),(3,2,1),]\n elif system == 'monoclinic' and axis=='b':\n permlist = [(1,2,3),(-1,2,-3)]\n elif system == 'monoclinic' and axis=='a':\n permlist = [(1,2,3),(1,-2,-3)]\n elif system == 'monoclinic' and axis=='c':\n permlist = [(1,2,3),(-1,-2,3)]\n elif system == 'trigonal':\n permlist = [(1,2,3),(2,1,3),(-1,-2,3),(-2,-1,3)]\n elif system == 'rhombohedral':\n permlist = [(1,2,3),(2,3,1),(3,1,2)]\n else:\n permlist = [(1,2,3)]\n\n hklref = list(hklref)\n for perm in permlist:\n hkl = [abs(i) * hklin[abs(i)-1] / i for i in perm]\n if hkl == hklref: return True\n if [-i for i in hkl] == hklref: return True\n return False\n\n for key in sgtbxlattinp.sgtbx8:\n spdict = spc.SpcGroup(key)[1]\n cell = sgtbxlattinp.sgtbx8[key][0]\n Axis = spdict['SGUniq']\n system = spdict['SGSys']\n\n g2list = GenHLaue(dmin,spdict,cell2A(cell))\n #if len(g2list) != len(sgtbxlattinp.sgtbx8[key][1]):\n # print 'failed',key,':' ,len(g2list),'vs',len(sgtbxlattinp.sgtbx8[key][1])\n # print 'GSAS-II:'\n # for h,k,l,d in g2list: print ' ',(h,k,l),d\n # print 'SGTBX:'\n # for hkllist,dref in sgtbxlattinp.sgtbx8[key][1]: print ' ',hkllist,dref\n assert len(g2list) == len(sgtbxlattinp.sgtbx8[key][1]), (\n 'Reflection lists differ for %s' % key\n )\n #match = True\n for h,k,l,d in g2list:\n for hkllist,dref in sgtbxlattinp.sgtbx8[key][1]: \n if abs(d-dref) < derror:\n if indexmatch((h,k,l,), hkllist, system, Axis): break\n else:\n assert 0,'No match for %s at %s (%s)' % ((h,k,l),d,key)\n #match = False\n #if not match: \n #for hkllist,dref in sgtbxlattinp.sgtbx8[key][1]: print ' ',hkllist,dref\n #print center, Laue, Axis, system\nif __name__ == '__main__': selftestlist.append(test8)\n \ndef test9():\n 'test GenHLaue'\n _ReportTest()\n import GSASIIspc as G2spc\n if NeedTestData: TestData()\n for spc in LaueTestData:\n data = LaueTestData[spc]\n cell = data[0]\n hklm = np.array(data[1])\n H = hklm[-1][:3]\n hklO = hklm.T[:3].T\n A = cell2A(cell)\n dmin = 1./np.sqrt(calc_rDsq(H,A))\n SGData = G2spc.SpcGroup(spc)[1]\n hkls = np.array(GenHLaue(dmin,SGData,A))\n hklN = hkls.T[:3].T\n #print spc,hklO.shape,hklN.shape\n err = True\n for H in hklO:\n if H not in hklN:\n print ('%d %s'%(H,' missing from hkl from GSASII'))\n err = False\n assert(err)\nif __name__ == '__main__': selftestlist.append(test9)\n \n \n \n\nif __name__ == '__main__':\n # run self-tests\n selftestquiet = False\n for test in selftestlist:\n test()\n print (\"OK\")\n"
] | [
[
"numpy.linalg.inv"
],
[
"numpy.reshape",
"numpy.frombuffer",
"numpy.asarray",
"numpy.dtype"
],
[
"numpy.diag",
"numpy.dot",
"numpy.sqrt",
"numpy.arctan",
"numpy.arctan2",
"numpy.seterr",
"numpy.concatenate",
"numpy.zeros_like",
"numpy.any",
"numpy.polyval",
"numpy.roll",
"numpy.where",
"numpy.ones_like",
"numpy.allclose",
"numpy.arcsin",
"numpy.eye",
"numpy.sin",
"numpy.linalg.det",
"numpy.multiply.outer",
"numpy.copy",
"numpy.zeros",
"numpy.linalg.inv",
"numpy.rint",
"numpy.arccos",
"numpy.tan",
"numpy.linalg.eigh",
"numpy.argsort",
"numpy.array",
"numpy.sum",
"numpy.inner",
"numpy.cos",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ajeytiwary/sunpy | [
"6ba94b471f2a2e716f91ef8b8014adbef358aa6f"
] | [
"sunpy/cm/cm.py"
] | [
"\"\"\"\nThis module provides a set of colormaps specific for solar data.\n\"\"\"\nfrom __future__ import absolute_import\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\n\nfrom sunpy.cm import color_tables as ct\n\n__all__ = ['get_cmap', 'show_colormaps']\n\nsdoaia94 = ct.aia_color_table(94)\nsdoaia131 = ct.aia_color_table(131)\nsdoaia171 = ct.aia_color_table(171)\nsdoaia193 = ct.aia_color_table(193)\nsdoaia211 = ct.aia_color_table(211)\nsdoaia304 = ct.aia_color_table(304)\nsdoaia335 = ct.aia_color_table(335)\nsdoaia1600 = ct.aia_color_table(1600)\nsdoaia1700 = ct.aia_color_table(1700)\nsdoaia4500 = ct.aia_color_table(4500)\n\nsohoeit171 = ct.eit_color_table(171)\nsohoeit195 = ct.eit_color_table(195)\nsohoeit284 = ct.eit_color_table(284)\nsohoeit304 = ct.eit_color_table(304)\n\nsoholasco2 = ct.lasco_color_table(2)\nsoholasco3 = ct.lasco_color_table(3)\n\nstereocor1 = ct.cor_color_table(1)\nstereocor2 = ct.cor_color_table(2)\n\nstereohi1 = ct.stereo_hi_color_table(1)\nstereohi2 = ct.stereo_hi_color_table(2)\n\nyohkohsxtal = ct.sxt_color_table('al')\nyohkohsxtwh = ct.sxt_color_table('wh')\n\nhinodexrt = ct.xrt_color_table()\nhinodesotintensity = ct.sot_color_table('intensity')\n#hinodesotstokesquv = ct.sot_color_table('stokesQUV')\n#hinodesotmagneticf = ct.sot_color_table('magnetic field')\n#hinodesotvelocity = ct.sot_color_table('velocity')\n#hinodesotwidth = ct.sot_color_table('width')\n\ntrace171 = ct.trace_color_table('171')\ntrace195 = ct.trace_color_table('195')\ntrace284 = ct.trace_color_table('284')\ntrace1216 = ct.trace_color_table('1216')\ntrace1550 = ct.trace_color_table('1550')\ntrace1600 = ct.trace_color_table('1600')\ntrace1700 = ct.trace_color_table('1700')\ntraceWL = ct.trace_color_table('WL')\n\nhmimag = ct.hmi_mag_color_table()\n\ncmlist = {\n 'sdoaia94': sdoaia94,\n 'sdoaia131': sdoaia131,\n 'sdoaia171': sdoaia171,\n 'sdoaia193': sdoaia193,\n 'sdoaia211': sdoaia211,\n 'sdoaia304': sdoaia304,\n 'sdoaia335': sdoaia335,\n 'sdoaia1600': sdoaia1600,\n 'sdoaia1700': sdoaia1700,\n 'sdoaia4500': sdoaia4500,\n 'sohoeit171': sohoeit171,\n 'sohoeit195': sohoeit195,\n 'sohoeit284': sohoeit284,\n 'sohoeit304': sohoeit304,\n 'soholasco2': soholasco2,\n 'soholasco3': soholasco3,\n 'stereocor1': stereocor1,\n 'stereocor2': stereocor2,\n 'stereohi1': stereohi1,\n 'stereohi2': stereohi2,\n 'rhessi': cm.jet, # pylint: disable=E1101\n 'yohkohsxtal': yohkohsxtal,\n 'yohkohsxtwh': yohkohsxtwh,\n 'hinodexrt': hinodexrt,\n 'hinodesotintensity': hinodesotintensity,\n #'hinodesotstokesquv': hinodesotstokesquv,\n #'hinodesotmagneticf': hinodesotmagneticf,\n #'hinodesotvelocity': hinodesotvelocity,\n #'hinodesotwidth': hinodesotwidth,\n 'trace171': trace171,\n 'trace195': trace195,\n 'trace284': trace284,\n 'trace1216': trace1216,\n 'trace1550': trace1550,\n 'trace1600': trace1600,\n 'trace1700': trace1700,\n 'traceWL': traceWL,\n 'hmimag': hmimag\n }\n\n\ndef get_cmap(name='sdoaia94'):\n \"\"\"Get a colormap.\n\n Parameters\n ----------\n name : string\n The name of a color map.\n\n Returns\n -------\n value : matplotlib colormap\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> colormap = cm.get_cmap(name = 'sdoaia94')\n\n References\n ----------\n | http://matplotlib.sourceforge.net/api/cm_api.html\n\n \"\"\"\n if name in cmlist:\n return cmlist.get(name)\n else:\n raise ValueError(\"Colormap {name!s} is not recognized\".format(name=name))\n\n\ndef show_colormaps():\n \"\"\"Displays a plot of the custom color maps supported in SunPy.\n\n Parameters\n ----------\n None : none\n\n Returns\n -------\n None : none\n\n See Also\n --------\n\n Examples\n --------\n >>> import sunpy.cm as cm\n >>> cm.show_colormaps()\n\n References\n ----------\n\n \"\"\"\n maps = sorted(cmlist)\n nmaps = len(maps) + 1\n\n a = np.linspace(0, 1, 256).reshape(1, -1) # pylint: disable=E1103\n a = np.vstack((a, a))\n\n fig = plt.figure(figsize=(5, 10))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99)\n for i, name in enumerate(maps):\n ax = plt.subplot(nmaps, 1, i + 1)\n plt.axis(\"off\")\n plt.imshow(a, aspect='auto', cmap=get_cmap(name), origin='lower')\n pos = list(ax.get_position().bounds)\n fig.text(pos[0] - 0.01, pos[1], name, fontsize=10,\n horizontalalignment='right')\n\n plt.show()\n\n# def test_equalize(data):\n# \"\"\"Returns a color map which performs histogram equalization on the data.\n#\n# Parameters\n# ----------\n# data : ndarray\n#\n# Returns\n# -------\n# value : matplotlib colormap\n#\n# See Also\n# --------\n#\n# Examples\n# --------\n# >>> import sunpy.cm as cm\n# >>> cm.test_equalize()\n#\n# Reference\n# ---------\n# | http://matplotlib.sourceforge.net/api/cm_api.html\n#\n# .. warning:: this function is under development\n#\n# .. todo:: finish coding this function!\n#\n# \"\"\"\n# dfile = cbook.get_sample_data('s1045.ima', asfileobj=False)\n#\n# im = np.fromstring(file(dfile, 'rb').read(), np.uint16).astype(float)\n# im.shape = 256, 256\n#\n# #imshow(im, ColormapJet(256))\n# #imshow(im, cmap=cm.jet)\n#\n# imvals = np.sort(im.flatten())\n# lo = imvals[0]\n# hi = imvals[-1]\n# steps = (imvals[::len(imvals)/256] - lo) / (hi - lo)\n# num_steps = float(len(steps))\n# interps = [(s, idx/num_steps, idx/num_steps) for idx,\n# s in enumerate(steps)]\n# interps.append((1, 1, 1))\n# cdict = {'red': interps,\n# 'green': interps,\n# 'blue': interps}\n# histeq_cmap = colors.LinearSegmentedColormap('HistEq', cdict)\n# pylab.figure()\n# pylab.imshow(im, cmap=histeq_cmap)\n# pylab.title('histeq')\n# pylab.show()\n"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.show",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
csprock/bmdcluster | [
"0caf02bb8a93846aa679518ee6a839f843819eac",
"0caf02bb8a93846aa679518ee6a839f843819eac"
] | [
"bmdcluster/optimizers/blockdiagonalBMD.py",
"tests/test_generalBMD.py"
] | [
"import numpy as np\r\n\r\n\"\"\"\r\nThis module contains a variant of the Binary Matrix Decomposition (BMD) algorithm for clustering binary data\r\nas presented in \"A General Model for Clustering Binary Data\" (Tao Li, 2005) and \"On Clustering Binary Data\"\r\n(Tao Li & Shenghuo Zhu, 2005). This varient of the BMD algorithm is for data whose matrix is can be \r\nrearranged into block-diagonal form. That is, each set of data is associated with a set of features and vice-versa. \r\nThis module implements Algorithm 2 from Li (2005) supplemented with ideas from Li & Zhu (2005). \r\n\r\n\r\nGeneral Nomenclature:\r\n\r\n K: the number of data clusters\r\n C: the number of feature clusters\r\n n: size of data set\r\n m: number of data features\r\n W: binary data matrix\r\n Is of size n x m, with data in rows and features in columns. \r\n A: data cluster indicator matrix\r\n n x K binary indicator matrix encoding the cluster membership of the data. \r\n Each point can belong to exactly one cluster, so each row consists of zeros except for a single 1. \r\n B: feature cluster indicator matrix. \r\n m x C binary indicator matrix encoding the cluster membership of the features. \r\n Each feature can belong to exactly one cluster, so each row consists of zeros except for a single 1. \r\n X: a K x C matrix that encodes the relationship between data clusters and feature clusters.\r\n\r\n\"\"\"\r\n\r\nITER_MESSAGE = \"Iteration: {0} ............. Cost: {1:.3f}\"\r\n\r\ndef _bd_objective(A,B,W):\r\n \"\"\" Objective function for block diagonal variation of BMD.\"\"\"\r\n return np.linalg.norm(W - np.dot(A, B.T))\r\n\r\n\r\ndef _is_bd_outlier(B):\r\n \"\"\"Determines if a feature is an outlier if it is equally associated \r\n with each cluster. This is checked by seeing if all the entries in a \r\n given row of the candidate feature cluster association matrix are 1's. \r\n Any rows that meet these conditions are set to 0. (see Li and Zhu)\r\n\r\n Parameters\r\n ----------\r\n B : np.array\r\n candidate feature cluster assignment matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n feature cluster assignment matrix\r\n \"\"\"\r\n\r\n i = np.where(np.sum(B, axis=1) == B.shape[1])[0]\r\n B[i, :] = 0\r\n \r\n return B\r\n \r\n\r\ndef _d_ik(i, W, B):\r\n \"\"\" The data cluster matrix A is updated using formula 10 from Li (2005) which is the same as \r\n formula 2.3 in Li & Zhu (2005). The formula uses the squared distance between ith point and \r\n the kth cluster. The point is then assigned to the closest cluster. The squared distance\r\n between point i and data cluster k is computed by summing over the element-wise differences\r\n between the i-th row and k-th row of W and B, respectively: \r\n \r\n d[i,k] = SUM_{j in features} (W[i,j] - B[k,j])^2j\r\n \r\n Parameters\r\n ----------\r\n i : int\r\n infdex of data point\r\n W : np.array\r\n binary data matrix\r\n B : np.array\r\n feature cluster assignment matrix\r\n \r\n Returns\r\n -------\r\n int\r\n index of assigned cluster\r\n \"\"\"\r\n\r\n # Vectorized implementation to compute summations found in formula 10. \r\n Di = W[i,:].reshape((W.shape[1],1)) - B # broadcast i-th row of W across columns of B\r\n Di = Di*Di \r\n Di = Di.sum(axis = 0) # sum over rows (features)\r\n assigned_cluster = Di.argmin() # take index of minimum quantity to be new cluster assignment\r\n \r\n return assigned_cluster\r\n \r\n\r\n \r\n#### assign clusters ####\r\n#def ai(B,W,i):\r\n# \r\n# q = B.T - W[i,:]\r\n# q = q*q\r\n# q = q.sum(axis = 1)\r\n# return q.argmin()\r\n\r\n#########################\r\n\r\ndef _bd_updateA(A,B,W):\r\n \"\"\"Update data cluster assignment matrix A using formula 10 in Li (2005). \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n old data cluster matrix\r\n B : np.array\r\n old feature cluster matrix\r\n W : np.array\r\n binary data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n updated data cluster matrix\r\n \"\"\"\r\n\r\n n, K = A.shape\r\n A_new = np.zeros((n,K))\r\n \r\n for i in range(n): \r\n A_new[i,:], A_new[i, _d_ik(i, W, B)] = 0, 1\r\n \r\n return A_new\r\n\r\n\r\ndef _Y(A, W):\r\n \"\"\" The feature cluster matrix B is updated using formula 11 from Li (2005). This is done\r\n by computing a 'probability matrix' Y where the kj-th entry represents the probability\r\n feature j is in the k-th cluster. The updated matrix B is the same shape as Y and contains\r\n 1's where the corresponding entry of Y is greater than or equal to 1/2 and 0's elsewhere. \r\n (Note Li (2005) uses a strict inequality, but we have found empirically that nonstrict \r\n inequality works better.) \r\n \r\n The formula for the matrix Y is: \r\n \r\n \r\n y[i,j] = (1/n_k)*SUM_{i in data} a[i,k]*w[i,j] = (1/n_k)*( a[:,k]'w[:,j] )\r\n n_k = number of points in cluster k\r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n data cluster matrix\r\n W : np.array\r\n data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n probability matrix\r\n \"\"\"\r\n\r\n n_k = A.sum(axis = 0) # Compute number of points in each cluster.\r\n n_k[np.where(n_k == 0)[0]] = np.inf # Set zero entries to inf to zero out reciprocal. \r\n r = 1 / n_k # Compute reciprocal. \r\n r.shape = (A.shape[1],1) # Reshape for broadcasting. \r\n\r\n return np.dot(A.T, W)*r # Compute Y matrix as dot product matrix of rows of A and W. \r\n\r\n\r\ndef _bd_updateB(A,W):\r\n \"\"\" Updated feature cluster matrix B. Applies the _Y() and B set to the matrix the same shape\r\n as Y but with 1's in the entries corresponding to where Y[>=0.5] and 0's elsewhere.\r\n \r\n Features that are associated with all clusters are 'outliers' (have a row whose entries >=0.5 in Y)\r\n Following Li and Zhu are not assigned to any clusters by setting all entries in B associated with those\r\n features to 0. \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n old data cluster matrix\r\n W : np.array\r\n data matrix\r\n \r\n Returns\r\n -------\r\n np.array\r\n new feature cluster matrix\r\n \"\"\"\r\n\r\n \r\n Y = _Y(A, W)\r\n B_new = np.greater_equal(Y, 0.5).T # Update B matrix. \r\n \r\n #### setting all True rows to False ####\r\n # if feature has similar associate to all clusters, is an outlier (see Li and Zhu)\r\n # will have a row of all True by the np.greater_equal() function, reverse to make row of False\r\n \r\n # # TODO: use single outlier function and create a shared utils.py \r\n # def is_outlier(d):\r\n \r\n # if np.array_equal(d, np.array([True]*len(d))):\r\n # return np.array([False]*len(d))\r\n # else:\r\n # return d\r\n \r\n # B_new = np.apply_along_axis(is_outlier, axis = 1, arr = B_new)\r\n\r\n B_new = _is_bd_outlier(B_new)\r\n \r\n return B_new\r\n \r\n\r\ndef run_bd_BMD(A,W, max_iter=100, verbose=False):\r\n \"\"\"Executes clustering Algorithm 2 from Li (2005). \r\n \r\n Parameters\r\n ----------\r\n A : np.array\r\n initial data cluster assignment matrix\r\n W : np.array\r\n binary data matrix\r\n max_iter : int, optional\r\n maximum number of algorithm iterations, by default 100\r\n verbose : bool, optional\r\n print progress and objective function value, by default False\r\n \r\n Returns\r\n -------\r\n float\r\n final value of objective function\r\n np.array\r\n final data cluster matrix\r\n np.array\r\n final feature cluster matrix\r\n \"\"\"\r\n \r\n \r\n B = _bd_updateB(A,W)\r\n O_old = _bd_objective(A, B, W)\r\n\r\n n_iter = 0\r\n\r\n while n_iter < max_iter:\r\n A = _bd_updateA(A,B,W)\r\n B = _bd_updateB(A,W)\r\n O_new = _bd_objective(A,B,W)\r\n if O_new < O_old:\r\n O_old = O_new\r\n if verbose:\r\n print(ITER_MESSAGE.format(n_iter, O_new))\r\n n_iter += 1\r\n else:\r\n break\r\n\r\n if verbose:\r\n print(\"Convergence reached after {0} iterations\".format(n_iter+1))\r\n \r\n return O_new, A, B",
"import unittest\r\nimport numpy as np\r\n\r\nfrom .context import generalBMD\r\n\r\n# from bmdcluster.optimizers.generalBMD import run_BMD\r\n# from bmdcluster.optimizers.generalBMD import _updateB\r\n# from bmdcluster.optimizers.generalBMD import _updateA\r\n# from bmdcluster.optimizers.generalBMD import _updateX\r\n\r\n\r\nclass TestExampleDataset_General(unittest.TestCase):\r\n\r\n def setUp(self):\r\n\r\n self.W = np.loadtxt(open('tests/data/test_set_2.csv', 'r'), delimiter = ',')\r\n\r\n self.A , self.B = np.zeros((6,3)), np.zeros((6,3))\r\n\r\n for i in range(0,3): \r\n self.A[2*i, i], self.B[2*i,i] = 1, 1\r\n #self.A[0,0], self.A[2,1], self.A[4,2] = 1,1,1\r\n\r\n\r\n self.expected_X = np.array([[1,0,1],\r\n [0,1,0],\r\n [0,0,1]])\r\n\r\n\r\n self.expected_AB, j = np.zeros([6,3]), 0\r\n for i in range(0,3):\r\n self.expected_AB[j:(j+2),i] = 1\r\n j = j + 2\r\n\r\n\r\n def test_updateX(self):\r\n self.assertTrue(np.array_equal(self.expected_X,\r\n generalBMD._updateX(self.A, self.B, self.W)))\r\n\r\n def test_updateA(self):\r\n self.assertTrue(np.array_equal(self.expected_AB,\r\n generalBMD._updateA(self.A, self.B, self.expected_X, self.W)))\r\n\r\n def test_updateB(self):\r\n self.assertTrue(np.array_equal(self.expected_AB,\r\n generalBMD._updateB(self.A, self.B, self.expected_X, self.W)))\r\n\r\n def test_run_BMD(self):\r\n\r\n _, A, B, _ = generalBMD.run_BMD(self.A, self.B, self.W, verbose = 0)\r\n\r\n with self.subTest():\r\n self.assertTrue(np.array_equal(A, self.expected_AB))\r\n\r\n with self.subTest():\r\n self.assertTrue(np.array_equal(B, self.expected_AB))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()"
] | [
[
"numpy.dot",
"numpy.greater_equal",
"numpy.where",
"numpy.sum",
"numpy.zeros"
],
[
"numpy.array",
"numpy.zeros",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Zelenyy/phd-code | [
"d5b8bfefd2418a915dde89f7da2cb6683f438556",
"d5b8bfefd2418a915dde89f7da2cb6683f438556"
] | [
"python/simulation_scripts/grep_log.py",
"python/simulation_scripts/thunderstorm/minimal_field_script_2.py"
] | [
"from phd.utils.path_tools import LogTime\nimport numpy as np\n\nlog = LogTime(\".\")\npaths = np.array(log.paths)\nindx = paths.argsort()\ncount = 1\nfor t, p in zip(log.time[indx], paths[indx]):\n if (count % 3 == 0):\n print(p,t)\n count+=1",
"import logging\nimport os\nfrom string import Template\n\nfrom dataforge import Meta\nfrom phd.thunderstorm import atmosphere\nfrom phd.thunderstorm.convert_to_hdf5 import CylinderProtoSet\nfrom phd.thunderstorm.critical_energy import get_critical_energy, CriticalEnergyProcessor, CriticalEnergyProvider\nfrom phd.thunderstorm.minimal_field import get_minimal_field\nfrom phd.utils.hdf5_tools import get_convertor, ProtoSetReader\nfrom phd.utils.run_tools import multirun_command, InputData, create_gdml, dir_name_generator, values_from_dict\n\nROOT_PATH = os.path.dirname(__file__)\n\nINPUT_TEMPLATE = \"\"\"/npm/geometry/type gdml\n/npm/geometry/gdml ${path}\n/npm/thunderstorm/physics ${physics}\n/npm/thunderstorm/minimal_energy ${min_energy} MeV\n/npm/thunderstorm/stacking/electron true\n/npm/thunderstorm/stacking/positron false\n/npm/thunderstorm/stacking/gamma false\n/npm/thunderstorm/stacking/save_gamma false\n/npm/thunderstorm/stacking/save_electron true\n/npm/thunderstorm/stacking/save_electron_cut ${min_energy} MeV\n/npm/thunderstorm/tracking/save_gamma false\n/npm/thunderstorm/tracking/save_electron true\n\n/gps/particle e-\n/gps/number 1\n/gps/direction 0 0 -1\n/gps/ene/mono 1.2 MeV\n/gps/position 0.0 0.0 499.0 m\n/run/beamOn ${number}\nexit\n\"\"\"\n\nimport numpy as np\n\n\n\ndef input_generator_critical_energy():\n gdml_template = os.path.join(ROOT_PATH, \"template\", \"critical_energy.gdml\")\n macros_template = Template(INPUT_TEMPLATE)\n count = 0\n ratio = 0.03\n cep = CriticalEnergyProvider()\n for h_index, height in enumerate(np.arange(0.0, 16000.0, 1000.0)): # 16000.0\n min_field = get_minimal_field(height)\n\n for field in np.arange(min_field, min_field*1.5, min_field*ratio):\n values_gdml = {\n 'height': height,\n 'field': field*1e-4,\n }\n critical_energy = cep.get_critical_energy(height, field*1e-4)\n paths, _ = create_gdml(gdml_template, values_gdml, h_index)\n gdml_path = paths[0]\n values = {\n \"path\": [os.path.join(\"..\",gdml_path)],\n \"physics\": [\"standard_opt_4\"],\n 'number': [1000],\n 'min_energy': [critical_energy],\n }\n\n for path, values in zip(\n dir_name_generator(\".\", \"sim\", start=count),\n values_from_dict(values)\n ):\n count += 1\n text = macros_template.substitute(values)\n input_data_meta = {\n \"macros\": values,\n \"gdml\": values_gdml\n }\n data = InputData(\n text=text,\n path=path,\n values=Meta(input_data_meta)\n )\n yield data\n\n\ndef main():\n logging.basicConfig(filename=\"run.log\")\n logging.root.setLevel(logging.DEBUG)\n\n input_data = input_generator_critical_energy()\n command = \"../../build/thunderstorm/geant4-thunderstorm.exe\"\n readers = [\n ProtoSetReader(\"stacking_simple.bin\", CylinderProtoSet),\n ProtoSetReader(\"tracking_post.bin\", CylinderProtoSet),\n ]\n multirun_command(input_data, command, post_processor=get_convertor(readers, \"./result.hdf5\", clear=True))\n return 0\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.array"
],
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
binfnstats/eli5 | [
"017c738f8dcf3e31346de49a390835ffafad3f1b",
"017c738f8dcf3e31346de49a390835ffafad3f1b",
"017c738f8dcf3e31346de49a390835ffafad3f1b"
] | [
"eli5/formatters/image.py",
"eli5/_feature_names.py",
"eli5/_feature_weights.py"
] | [
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom typing import Union, Optional, Callable\n\nimport numpy as np\nfrom PIL import Image\nimport matplotlib.cm\n\nfrom eli5.base import Explanation\n\n\ndef format_as_image(expl, # type: Explanation\n resampling_filter=Image.LANCZOS, # type: int\n colormap=matplotlib.cm.viridis, # type: Callable[[np.ndarray], np.ndarray]\n alpha_limit=0.65, # type: Optional[Union[float, int]]\n ):\n # type: (...) -> Image\n \"\"\"format_as_image(expl, resampling_filter=Image.LANCZOS, colormap=matplotlib.cm.viridis, alpha_limit=0.65)\n\n Format a :class:`eli5.base.Explanation` object as an image.\n\n Note that this formatter requires ``matplotlib`` and ``Pillow`` optional dependencies.\n\n\n :param Explanation expl:\n :class:`eli5.base.Explanation` object to be formatted.\n It must have an ``image`` attribute with a Pillow image that will be overlaid.\n It must have a ``targets`` attribute, a list of :class:`eli5.base.TargetExplanation` \\\n instances that contain the attribute ``heatmap``, \\\n a rank 2 numpy array with float values in the interval [0, 1].\n Currently ``targets`` must be length 1 (only one target is supported).\n\n\n :raises TypeError: if ``heatmap`` is not a numpy array.\n :raises ValueError: if ``heatmap`` does not contain values as floats in the interval [0, 1].\n :raises TypeError: if ``image`` is not a Pillow image.\n\n :param resampling_filter:\n Interpolation ID or Pillow filter to use when resizing the image.\n\n Example filters from PIL.Image\n * ``NEAREST``\n * ``BOX``\n * ``BILINEAR``\n * ``HAMMING``\n * ``BICUBIC``\n * ``LANCZOS``\n\n See also `<https://pillow.readthedocs.io/en/stable/handbook/concepts.html#filters>`_.\n\n *Note that these attributes are integer values*.\n\n Default is ``PIL.Image.LANCZOS``.\n :type resampling_filter: int, optional\n\n :param colormap:\n Colormap scheme to be applied when converting the heatmap from grayscale to RGB.\n Either a colormap from matplotlib.cm, \n or a callable that takes a rank 2 array and \n returns the colored heatmap as a [0, 1] RGBA numpy array.\n\n Example colormaps from matplotlib.cm\n * ``viridis``\n * ``jet``\n * ``binary``\n\n See also https://matplotlib.org/gallery/color/colormap_reference.html.\n\n Default is ``matplotlib.cm.viridis`` (green/blue to yellow).\n :type colormap: callable, optional\n\n :param alpha_limit:\n Maximum alpha (transparency / opacity) value allowed \n for the alpha channel pixels in the RGBA heatmap image.\n\n Between 0.0 and 1.0.\n\n Useful when laying the heatmap over the original image, \n so that the image can be seen over the heatmap.\n\n Default is 0.65.\n\n\n :raises ValueError: if ``alpha_limit`` is outside the [0, 1] interval.\n :raises TypeError: if ``alpha_limit`` is not float, int, or None.\n :type alpha_limit: float or int, optional\n\n\n Returns\n -------\n overlay : PIL.Image.Image\n PIL image instance of the heatmap blended over the image.\n \"\"\"\n image = expl.image\n # validate image\n if not isinstance(image, Image.Image):\n raise TypeError('Explanation image must be a PIL.Image.Image instance. '\n 'Got: {}'.format(image))\n if image.mode != 'RGBA':\n # normalize to 'RGBA'\n image = image.convert('RGBA')\n\n if not expl.targets:\n # no heatmaps\n return image\n else:\n assert len(expl.targets) == 1\n heatmap = expl.targets[0].heatmap\n _validate_heatmap(heatmap)\n\n # The order of our operations is: 1. colorize 2. resize\n # as opposed: 1. resize 2. colorize\n\n # save the original heatmap values\n heatvals = heatmap\n # apply colours to the grayscale array\n heatmap = _colorize(heatmap, colormap=colormap) # -> rank 3 RGBA array\n\n # make the alpha intensity correspond to the grayscale heatmap values\n # cap the intensity so that it's not too opaque when near maximum value\n _update_alpha(heatmap, starting_array=heatvals, alpha_limit=alpha_limit)\n\n heatmap = expand_heatmap(heatmap, image, resampling_filter=resampling_filter)\n overlay = _overlay_heatmap(heatmap, image)\n return overlay\n\n\ndef heatmap_to_image(heatmap):\n # type: (np.ndarray) -> Image\n \"\"\"\n Convert the numpy array ``heatmap`` to a Pillow image.\n\n Parameters\n ----------\n heatmap : numpy.ndarray\n Rank 2 grayscale ('L') array or rank 3 coloured ('RGB' or RGBA') array,\n with values in interval [0, 1] as floats.\n\n\n :raises TypeError: if ``heatmap`` is not a numpy array.\n :raises ValueError: if ``heatmap`` does not contain values as floats in the interval [0, 1].\n :raises ValueError: if ``heatmap`` rank is neither 2 nor 3.\n :raises ValueError: if rank 3 ``heatmap`` does not have 4 (RGBA) or 3 (RGB) channels.\n\n\n Returns\n -------\n heatmap_image : PIL.Image.Image\n Heatmap as an image with a suitable mode.\n \"\"\"\n _validate_heatmap(heatmap)\n rank = len(heatmap.shape)\n if rank == 2:\n mode = 'L'\n elif rank == 3:\n channels = heatmap.shape[2]\n if channels == 4:\n mode = 'RGBA'\n elif channels == 3:\n mode = 'RGB'\n else:\n raise ValueError('Rank 3 heatmap must have 4 channels (RGBA), '\n 'or 3 channels (RGB). '\n 'Got shape with {} channels'.format(channels))\n else:\n raise ValueError('heatmap must have rank 2 (L, grayscale) ' \n 'or rank 3 (RGBA, colored). '\n 'Got: %d' % rank)\n heatmap = (heatmap*255).astype('uint8') # -> [0, 255] int\n return Image.fromarray(heatmap, mode=mode)\n\n\ndef _validate_heatmap(heatmap):\n \"\"\"Check that ``heatmap`` is a numpy array\n with float values between 0 and 1.\"\"\"\n if not isinstance(heatmap, np.ndarray):\n raise TypeError('heatmap must be a numpy.ndarray instance. '\n 'Got: {}'.format(heatmap))\n mi = np.min(heatmap)\n ma = np.max(heatmap)\n if not (0 <= mi and ma <= 1):\n raise ValueError('heatmap must contain float values '\n 'between 0 and 1 inclusive. '\n 'Got array with minimum: {} ' \n 'and maximum: {}'.format(mi, ma))\n\n\ndef _colorize(heatmap, colormap):\n # type: (np.ndarray, Callable[[np.ndarray], np.ndarray]) -> np.ndarray\n \"\"\"\n Apply the ``colormap`` function to a grayscale \n rank 2 ``heatmap`` array (with float values in interval [0, 1]).\n Returns an RGBA rank 3 array with float values in range [0, 1].\n \"\"\"\n heatmap = colormap(heatmap) # -> [0, 1] RGBA ndarray\n return heatmap\n\n\ndef _update_alpha(image_array, starting_array=None, alpha_limit=None):\n # type: (np.ndarray, Optional[np.ndarray], Optional[Union[float, int]]) -> None\n \"\"\"\n Update the alpha channel values of an RGBA rank 3 ndarray ``image_array``,\n optionally creating the alpha channel from rank 2 ``starting_array``, \n and setting upper limit for alpha values (opacity) to ``alpha_limit``.\n\n This function modifies ``image_array`` in-place.\n \"\"\"\n # FIXME: this function may be too specialized and could be refactored\n # get the alpha channel slice\n if isinstance(starting_array, np.ndarray):\n alpha = starting_array\n else:\n # take the alpha channel as is\n alpha = image_array[:,:,3]\n # set maximum alpha value\n alpha = _cap_alpha(alpha, alpha_limit)\n # update alpha channel in the original image\n image_array[:,:,3] = alpha\n\n\ndef _cap_alpha(alpha_arr, alpha_limit):\n # type: (np.ndarray, Union[None, float, int]) -> np.ndarray\n \"\"\"\n Limit the alpha values in ``alpha_arr``\n by setting the maximum alpha value to ``alpha_limit``.\n Returns a a new array with the values capped.\n \"\"\"\n if alpha_limit is None:\n return alpha_arr\n elif isinstance(alpha_limit, (float, int)):\n if 0 <= alpha_limit <= 1:\n new_alpha = np.minimum(alpha_arr, alpha_limit)\n return new_alpha\n else:\n raise ValueError('alpha_limit must be' \n 'between 0 and 1 inclusive, got: %f' % alpha_limit)\n else:\n raise TypeError('alpha_limit must be int or float,' \n 'got: {}'.format(alpha_limit))\n\n\ndef expand_heatmap(heatmap, image, resampling_filter=Image.LANCZOS):\n # type: (np.ndarray, Image, Union[None, int]) -> Image\n \"\"\"\n Resize the ``heatmap`` image array to fit over the original ``image``,\n using the specified ``resampling_filter`` method.\n The heatmap is converted to an image in the process.\n\n Parameters\n ----------\n heatmap : numpy.ndarray\n Heatmap that is to be resized, as an array.\n\n image : PIL.Image.Image\n The image whose dimensions will be resized to.\n\n resampling_filter : int or None\n Interpolation to use when resizing.\n\n See :func:`eli5.format_as_image` for more details on the `resampling_filter` parameter.\n\n\n :raises TypeError: if ``image`` is not a Pillow image instance.\n\n\n Returns\n -------\n resized_heatmap : PIL.Image.Image\n The heatmap, resized, as a PIL image.\n \"\"\"\n if not isinstance(image, Image.Image):\n raise TypeError('image must be a PIL.Image.Image instance. '\n 'Got: {}'.format(image))\n heatmap = heatmap_to_image(heatmap)\n spatial_dimensions = (image.width, image.height)\n heatmap = heatmap.resize(spatial_dimensions, resample=resampling_filter)\n return heatmap\n\n\ndef _overlay_heatmap(heatmap, image):\n # type: (Image, Image) -> Image\n \"\"\"\n Blend (combine) ``heatmap`` over ``image``, \n using alpha channel values appropriately (must have mode `RGBA`).\n Output is 'RGBA'.\n \"\"\"\n # note that the order of alpha_composite arguments matters\n overlayed_image = Image.alpha_composite(image, heatmap)\n return overlayed_image",
"import re\nimport six\nfrom typing import (\n Any, Iterable, Iterator, Tuple, Sized, List, Optional, Dict,\n Union, Callable, Pattern\n)\n\nimport numpy as np\nimport scipy.sparse as sp\n\n\nclass FeatureNames(Sized, Iterable):\n \"\"\"\n A list-like object with feature names. It allows\n feature names for unknown features to be generated using\n a provided template, and to avoid making copies of large objects\n in get_feature_names.\n \"\"\"\n def __init__(self,\n feature_names=None,\n bias_name=None, # type: str\n unkn_template=None, # type: str\n n_features=None, # type: int\n ):\n # type: (...) -> None\n if not (feature_names is not None or\n (unkn_template is not None and n_features)):\n raise ValueError(\n 'Pass feature_names or unkn_template and n_features')\n if feature_names is not None:\n if not isinstance(feature_names, (list, dict, np.ndarray)):\n raise TypeError('Unexpected feature_names type')\n if n_features is not None and n_features != len(feature_names):\n if not isinstance(feature_names, dict):\n raise ValueError(\n 'n_features should match feature_names length')\n elif unkn_template is None:\n raise ValueError(\n 'unkn_template should be set for sparse features')\n self.feature_names = feature_names\n self.unkn_template = unkn_template\n self.n_features = n_features or len(feature_names) # type: int\n self.bias_name = bias_name\n\n def __repr__(self):\n # type: () -> str\n return '<FeatureNames: {} features {} bias>'.format(\n self.n_features, 'with' if self.has_bias else 'without')\n\n def __len__(self):\n # type: () -> int\n return self.n_features + int(self.has_bias)\n\n def __iter__(self):\n # type: () -> Iterator[str]\n return (self[i] for i in range(len(self)))\n\n def __getitem__(self, idx):\n if isinstance(idx, slice):\n return self._slice(idx)\n if isinstance(idx, np.ndarray):\n return [self[i] for i in idx]\n if self.has_bias and idx == self.bias_idx:\n return self.bias_name\n if 0 <= idx < self.n_features:\n try:\n return self.feature_names[idx]\n except (TypeError, KeyError, IndexError):\n return self.unkn_template % idx\n raise IndexError('Feature index out of range')\n\n def _slice(self, aslice):\n # type: (slice) -> Any\n if isinstance(self.feature_names, (list, np.ndarray)):\n # Fast path without going through __getitem__\n if self.has_bias:\n lst = list(self.feature_names)\n lst.append(self.bias_name)\n else:\n lst = self.feature_names\n return lst[aslice]\n else:\n indices = range(len(self))[aslice]\n return [self[idx] for idx in indices]\n\n @property\n def has_bias(self):\n # type: () -> bool\n return self.bias_name is not None\n\n @property\n def bias_idx(self):\n # type: () -> Optional[int]\n if self.has_bias:\n return self.n_features\n return None\n\n def filtered(self, feature_filter, x=None):\n # type: (Callable, Any) -> Tuple[FeatureNames, List[int]]\n \"\"\" Return feature names filtered by a regular expression \n ``feature_re``, and indices of filtered elements.\n \"\"\"\n indices = []\n filtered_feature_names = []\n indexed_names = None # type: Optional[Iterable[Tuple[int, Any]]]\n if isinstance(self.feature_names, (np.ndarray, list)):\n indexed_names = enumerate(self.feature_names)\n elif isinstance(self.feature_names, dict):\n indexed_names = six.iteritems(self.feature_names)\n elif self.feature_names is None:\n indexed_names = []\n assert indexed_names is not None\n\n if x is not None:\n if sp.issparse(x) and len(x.shape) == 2:\n assert x.shape[0] == 1\n flt = lambda nm, i: feature_filter(nm, x[0, i])\n else:\n # FIXME: mypy warns about x[i] because it thinks x can be None\n flt = lambda nm, i: feature_filter(nm, x[i]) # type: ignore\n else:\n flt = lambda nm, i: feature_filter(nm)\n\n for idx, name in indexed_names:\n if any(flt(nm, idx) for nm in _all_feature_names(name)):\n indices.append(idx)\n filtered_feature_names.append(name)\n if self.has_bias and flt(self.bias_name, self.bias_idx):\n assert self.bias_idx is not None # for mypy\n bias_name = self.bias_name\n indices.append(self.bias_idx)\n else:\n bias_name = None\n return (\n FeatureNames(\n filtered_feature_names,\n bias_name=bias_name,\n unkn_template=self.unkn_template,\n ),\n indices)\n\n def handle_filter(self,\n feature_filter,\n feature_re, # type: Pattern[str]\n x=None, # type: Any\n ):\n # type: (...) -> Tuple[FeatureNames, Union[List[int], None]]\n if feature_re is not None and feature_filter:\n raise ValueError('pass either feature_filter or feature_re')\n if feature_re is not None:\n if x is not None:\n feature_filter = lambda name, _: re.search(feature_re, name)\n else:\n feature_filter = lambda name: re.search(feature_re, name)\n\n if feature_filter is not None:\n return self.filtered(feature_filter, x)\n else:\n return self, None\n\n def add_feature(self, feature):\n # type: (Any) -> int\n \"\"\" Add a new feature name, return it's index.\n \"\"\"\n # A copy of self.feature_names is always made, because it might be\n # \"owned\" by someone else.\n # It's possible to make the copy only at the first call to\n # self.add_feature to improve performance.\n idx = self.n_features\n if isinstance(self.feature_names, (list, np.ndarray)):\n self.feature_names = list(self.feature_names)\n self.feature_names.append(feature)\n elif isinstance(self.feature_names, dict):\n self.feature_names = dict(self.feature_names)\n self.feature_names[idx] = feature\n elif self.feature_names is None:\n self.feature_names = {idx: feature}\n self.n_features += 1\n return idx\n\n\ndef _all_feature_names(name):\n # type: (Union[str, bytes, List[Dict]]) -> List[str]\n \"\"\" All feature names for a feature: usually just the feature itself,\n but can be several features for unhashed features with collisions.\n \"\"\"\n if isinstance(name, bytes):\n return [name.decode('utf8')]\n elif isinstance(name, list):\n return [x['name'] for x in name]\n else:\n return [name]\n",
"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\n\nimport numpy as np\n\nfrom eli5.base import FeatureWeights, FeatureWeight\nfrom .utils import argsort_k_largest_positive, argsort_k_smallest, mask\n\n\ndef _get_top_features(feature_names, coef, top, x):\n \"\"\"\n Return a ``(pos, neg)`` tuple. ``pos`` and ``neg`` are lists of\n ``(name, value)`` tuples for features with positive and negative\n coefficients.\n\n Parameters:\n\n * ``feature_names`` - a vector of feature names;\n * ``coef`` - coefficient vector; coef.shape must be equal to\n feature_names.shape;\n * ``top`` can be either a number or a ``(num_pos, num_neg)`` tuple.\n If ``top`` is a number, ``top`` features with largest absolute\n coefficients are returned. If it is a ``(num_pos, num_neg)`` tuple,\n the function returns no more than ``num_pos`` positive features and\n no more than ``num_neg`` negative features. ``None`` value means\n 'no limit'.\n * ``x`` is a vector of feature values, passed to FeatureWeight.value.\n \"\"\"\n if isinstance(top, (list, tuple)):\n num_pos, num_neg = list(top) # \"list\" is just for mypy\n pos = _get_top_positive_features(feature_names, coef, num_pos, x)\n neg = _get_top_negative_features(feature_names, coef, num_neg, x)\n else:\n pos, neg = _get_top_abs_features(feature_names, coef, top, x)\n return pos, neg\n\n\ndef get_top_features(feature_names, coef, top, x=None):\n pos, neg = _get_top_features(feature_names, coef, top, x)\n pos_coef = coef > 0\n neg_coef = coef < 0\n # pos_sum = sum(w for name, w in pos or [['', 0]])\n # neg_sum = sum(w for name, w in neg or [['', 0]])\n return FeatureWeights(\n pos=pos,\n neg=neg,\n pos_remaining=pos_coef.sum() - len(pos),\n neg_remaining=neg_coef.sum() - len(neg),\n # pos_remaining_sum=coef[pos_coef].sum() - pos_sum,\n # neg_remaining_sum=coef[neg_coef].sum() - neg_sum,\n )\n\n\ndef get_top_features_filtered(x, flt_feature_names, flt_indices,\n weights, top, scale=1.0):\n if flt_indices is not None:\n _x = mask(x, flt_indices)\n weights = mask(weights, flt_indices)\n else:\n _x = x\n return get_top_features(flt_feature_names, weights * scale, top, _x)\n\n\ndef _get_top_abs_features(feature_names, coef, k, x):\n indices = argsort_k_largest_positive(np.abs(coef), k)\n features = _features(indices, feature_names, coef, x)\n pos = [fw for fw in features if fw.weight > 0]\n neg = [fw for fw in features if fw.weight < 0]\n return pos, neg\n\n\ndef _get_top_positive_features(feature_names, coef, k, x):\n indices = argsort_k_largest_positive(coef, k)\n return _features(indices, feature_names, coef, x)\n\n\ndef _get_top_negative_features(feature_names, coef, k, x):\n num_negative = (coef < 0).sum()\n k = num_negative if k is None else min(num_negative, k)\n indices = argsort_k_smallest(coef, k)\n return _features(indices, feature_names, coef, x)\n\n\ndef _features(indices, feature_names, coef, x):\n names = mask(feature_names, indices)\n weights = mask(coef, indices)\n if x is not None:\n values = mask(x, indices)\n return [FeatureWeight(name, weight, value=value)\n for name, weight, value in zip(names, weights, values)]\n else:\n return [FeatureWeight(name, weight)\n for name, weight in zip(names, weights)]\n"
] | [
[
"numpy.max",
"numpy.minimum",
"numpy.min"
],
[
"scipy.sparse.issparse"
],
[
"numpy.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jason-neal/companion_simulations | [
"b5773e5539011d492b7128d0dd2778041ce50d52",
"b5773e5539011d492b7128d0dd2778041ce50d52"
] | [
"bin/coadd_chi2_db.py",
"mingle/utilities/simulation_utilities.py"
] | [
"#!/usr/bin/env python\n\"\"\"Co-add_chi2_values.py.\n\nCreate Table of minimum Chi_2 values and save to a table.\n\"\"\"\nimport argparse\nimport warnings\nimport glob\nimport os\nimport sys\n\nimport numpy as np\nimport pandas as pd\nimport sqlalchemy as sa\n\nimport simulators\n\n\ndef parse_args(args):\n \"\"\"Take care of all the argparse stuff.\n\n :returns: the args\n \"\"\"\n parser = argparse.ArgumentParser(description='Create Co-added Chi-squared db.')\n parser.add_argument('star', help='Star names')\n parser.add_argument(\"obsnum\", help=\"Observation number\")\n parser.add_argument('--suffix', default=\"\",\n help='Suffix to add to the file names.')\n parser.add_argument('-v', '--verbose', action=\"store_true\",\n help='Enable verbose.')\n parser.add_argument('-r', '--replace', action=\"store_true\",\n help='Overwrite the database if already exists.')\n parser.add_argument('-c', '--chunksize', default=1000, type=int,\n help='Chinksize for reading in csv files.')\n parser.add_argument(\"-m\", '--move', action=\"store_true\",\n help='Move original files after joining (default=False).')\n\n return parser.parse_args(args)\n\n\ndef main(star, obsnum, suffix, replace=False, verbose=False, chunksize=1000, move=False):\n \"\"\"\"\"\"\n star = star.upper()\n if suffix is None:\n suffix = \"\"\n\n patterns = [os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\",\n \"{0}-{1}_{2}_iam_chisqr_results{3}*.csv\".format(star, obsnum, chip, suffix))\n for chip in range(1, 5)]\n print(patterns)\n if (sum(1 for _ in glob.iglob(patterns[0]))) == 0:\n print(\"Patterns were not found\")\n patterns = [os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\", \"processed_csv\",\n \"{0}-{1}_{2}_iam_chisqr_results{3}*.csv\".format(star, obsnum, chip, suffix))\n for chip in range(1, 5)]\n\n print(\"new Patterns\", patterns)\n if sum(sum(1 for _ in glob.iglob(pattern)) for pattern in patterns) == 0:\n raise ValueError(\"Issue with patterns finding for {0} obs {1}\".format(star, obsnum))\n\n # Start up database\n coadd_database = os.path.join(\n simulators.paths[\"output_dir\"], star, \"iam\",\n \"{0}-{1}_coadd_iam_chisqr_results{2}.db\".format(star, obsnum, suffix))\n\n # print(\"Replace\", replace)\n print(\"os.path.isfile(coadd_database)\", os.path.isfile(coadd_database))\n if os.path.isfile(coadd_database):\n if replace:\n os.remove(coadd_database)\n else:\n raise IOError(\"The database file {0} already exists. Add the switch\"\n \" -r to replace the old database file.\".format(coadd_database))\n\n database_name = 'sqlite:///{0}'.format(coadd_database)\n engine = sa.create_engine(database_name)\n if verbose:\n print(\"csv_database =\", engine, type(engine))\n\n print(\"pattern lengths\", [sum(1 for _ in glob.iglob(pattern)) for pattern in patterns])\n\n # get list of patterns. and sort in order for loading in.\n detector_files = [sorted(glob.glob(pattern)) for pattern in patterns]\n\n i, j = 0, 1\n for num, files in enumerate(zip(*detector_files)):\n assert len(files) == 4\n f_0 = files[0]\n\n if \"[\" in f_0:\n n = f_0.split(\"[\")[-1]\n n = n.split(\"]\")[0]\n assert all(n in f for f in files) # All have this same host\n teff, logg, feh = [float(x) for x in n.split(\"_\")]\n if verbose:\n print(\"host params\", teff, logg, feh)\n host_flag = True\n else:\n host_flag = False\n teff, logg, feh = np.nan, np.nan, np.nan\n warnings.warn(\"No host parameter values found in file name.\")\n\n # Initalize iterators:\n iterators = [pd.read_csv(f, iterator=True, chunksize=chunksize) for f in files]\n\n while True:\n try:\n chunks = [pd_iter.get_chunk() for pd_iter in iterators]\n assert all([len(chunks[k]) == len(chunks[l])\n for k, l in ((0, 1), (1, 2), (2, 3))])\n except StopIteration:\n break\n\n joint_12 = pd.merge(chunks[0], chunks[1], how=\"outer\", suffixes=[\"_1\", \"_2\"],\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n joint_34 = pd.merge(chunks[2], chunks[3], how=\"outer\", suffixes=[\"_3\", \"_4\"],\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n pd_joint = pd.merge(joint_12, joint_34, how=\"outer\",\n on=['teff_2', 'logg_2', 'feh_2', 'rv', 'gamma'])\n\n # co-adding chisquare values across detectors\n pd_joint[\"coadd_chi2\"] = pd_joint[\"chi2_1\"] + pd_joint[\"chi2_2\"] + pd_joint[\"chi2_3\"] + pd_joint[\"chi2_4\"]\n pd_joint[\"coadd_npix\"] = pd_joint[\"npix_1\"] + pd_joint[\"npix_2\"] + pd_joint[\"npix_3\"] + pd_joint[\"npix_4\"]\n\n if pd_joint.isnull().values.any():\n print(pd_joint)\n assert not pd_joint.isnull().values.any(), \"There are nans in the joint DataFrame!!!\"\n\n # Adding host parameters\n pd_joint[\"teff_1\"] = teff\n pd_joint[\"logg_1\"] = logg\n pd_joint[\"feh_1\"] = feh\n pd_joint = pd_joint.rename(columns={c: c.replace(' ', '').lower() for c in pd_joint.columns})\n pd_joint.index += j\n\n i += 1\n pd_joint.to_sql('chi2_table', engine, if_exists='append')\n j = pd_joint.index[-1] + 1\n if verbose:\n print(\"Indicies = \", i, j)\n\n if move:\n for f in files:\n f_split = os.path.split(f) # [\"head\", \"tail\"]\n new_f = os.path.join(f_split[0], \"processed_csv\", f_split[1])\n os.makedirs(os.path.dirname(new_f), exist_ok=True)\n os.rename(f, new_f)\n\n if verbose:\n print(\"Reached end of part =\", num)\n\n if verbose:\n print(\"Completed coadd db creation\")\n\n return None\n\nif __name__ == \"__main__\":\n args = vars(parse_args(sys.argv[1:]))\n\n opts = {k: args[k] for k in args}\n main(**opts)\n print(\"\\nNow use coadd_analysis.py\")\n",
"#!/usr/bin/env python\n\"\"\"Simulation Utilities.\"\"\"\n\n# File to contain function necessary for the chi_square simulations\n\nimport copy\nimport logging\n\nimport numpy as np\n\n\ndef add_noise(flux, snr, use_mu=False):\n \"\"\"Using the formulation 1/sigma (default) or mu/sigma from wikipedia.\n\n https://en.wikipedia.org/wiki/Signal-to-noise_ratio#Alternative_definition\n\n Applies noise based on the flux at each pixel.\n \"\"\"\n if not snr:\n logging.warning(\"Assuming SNR=0 means add no noise\")\n return flux\n else:\n if use_mu:\n sigma = np.median(flux) / snr\n else:\n sigma = np.ones_like(flux) / snr\n\n # Add normal distributed noise at the snr level.\n noisy_flux = flux + np.random.normal(0, sigma)\n return noisy_flux\n\n\ndef combine_spectra(star, planet, alpha):\n \"\"\"Combine the Spectrum objects \"star\" and \"planet\".\n\n Strength ratio of alpha\n spec = star + planet * alpha\n\n \"\"\"\n star = copy.copy(star)\n planet = copy.copy(planet)\n\n if np.all(star.xaxis == planet.xaxis): # make sure wavelengths even first\n pass\n else:\n planet.interpolate1d_to(star)\n # combined_spectrum = star + (planet*alpha)\n # Combined spectra with proper normalization\n norm_factor = 1 / (1 + alpha)\n combined_spectrum = (star + (planet * alpha)) * norm_factor\n\n return combined_spectrum\n\n\ndef spec_max_delta(obs_spec, rvs, gammas):\n \"\"\"Calculate max doppler shift of a spectrum.\"\"\"\n return max_delta(obs_spec.xaxis, rvs, gammas)\n\n\ndef max_delta(wavelength, rvs, gammas):\n \"\"\"Calculate max doppler shift.\n\n Given a spectrum, and some doppler shifts, find the wavelength limit\n to have full coverage without much wastage computations.\n\n # Currently set at 2*delta.\n \"\"\"\n check_inputs(rvs)\n check_inputs(gammas)\n\n shift_max = np.max(np.abs(rvs)) + np.max(np.abs(gammas))\n\n obs_limits = np.array([np.min(wavelength), np.max(wavelength)])\n\n delta = [lim * shift_max / 299792.458 for lim in obs_limits]\n\n return 2 * round(max(delta), 3)\n\n\ndef check_inputs(var):\n \"\"\"Turn inputs into numpy arrays.\n\n Defaults to zero if None.\n \"\"\"\n if (var is None) or (\"None\" in str(var)):\n var = np.array([0])\n elif isinstance(var, (np.float, np.int)):\n var = np.asarray([var], dtype=np.float32)\n\n if len(var) == 0: # Empty sequence\n raise ValueError(\"Empty variable vector. Check config.yaml\\n\"\n \"var = {0}\".format(var))\n return var\n"
] | [
[
"pandas.merge",
"pandas.read_csv"
],
[
"numpy.ones_like",
"numpy.abs",
"numpy.min",
"numpy.asarray",
"numpy.median",
"numpy.all",
"numpy.max",
"numpy.random.normal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
baronrustamov/pytext | [
"9790943736e7c0ac53095be2e20177be6fc529a9",
"9790943736e7c0ac53095be2e20177be6fc529a9",
"9790943736e7c0ac53095be2e20177be6fc529a9",
"9790943736e7c0ac53095be2e20177be6fc529a9"
] | [
"pytext/data/masked_util.py",
"pytext/models/decoders/intent_slot_model_decoder.py",
"pytext/models/representations/transformer/residual_mlp.py",
"pytext/models/module.py"
] | [
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nfrom typing import Dict, List, Optional, Set\n\nimport numpy as np\nfrom pytext.common.constants import SpecialTokens, Token\nfrom pytext.config.component import Component, ComponentType\nfrom pytext.config.pytext_config import ConfigBase\nfrom pytext.data.data_structures.annotation import Annotation, Intent, Root, Slot\nfrom pytext.data.utils import VocabBuilder, Vocabulary\n\n\nclass MaskedVocabBuilder(VocabBuilder):\n def __init__(self, delimiter=\" \"):\n super().__init__(delimiter)\n self.use_mask = True\n\n\nSPECIAL_TOKENS: Dict[str, Token] = {\n str(SpecialTokens.MASK): SpecialTokens.MASK,\n str(SpecialTokens.BOS): SpecialTokens.BOS,\n str(SpecialTokens.EOS): SpecialTokens.EOS,\n}\n\n\nclass MaskingFunction(Component):\n class Config(ConfigBase):\n pass\n\n __EXPANSIBLE__ = True\n __COMPONENT_TYPE__ = ComponentType.MASKING_FUNCTION\n\n @classmethod\n def from_config(cls, config, use_bos, use_eos):\n return cls(use_bos, use_eos)\n\n def __init__(self, use_bos, use_eos):\n self.use_bos = use_bos\n self.use_eos = use_eos\n\n def should_mask(self, *args, **kwargs) -> bool:\n return True\n\n def gen_masked_source_target(self, tokens, *args, **kwargs):\n raise NotImplementedError()\n\n def _prepare_dec_target(\n self, dec_source: List[int], clean_input_tokens: List[int], vocab: Vocabulary\n ) -> List[int]:\n dec_target = [\n vocab.get_pad_index()\n if dec_source_token != vocab.get_mask_index()\n else dec_real_target_token\n for (dec_source_token, dec_real_target_token) in zip(\n dec_source, clean_input_tokens\n )\n ]\n\n return dec_target\n\n\nclass TreeMask(MaskingFunction):\n class Config(ConfigBase):\n accept_flat_intents_slots: bool = True\n factor: int = 2\n\n @classmethod\n def from_config(cls, config, use_bos, use_eos):\n return cls(config.accept_flat_intents_slots, config.factor, use_bos, use_eos)\n\n def __init__(self, accept_flat_intents_slots, factor, use_bos, use_eos):\n super().__init__(use_bos, use_eos)\n self.accept_flat_intents_slots = accept_flat_intents_slots\n self.factor = factor\n\n def clean_eos_bos(self, tokens):\n start_index, end_index = 0, len(tokens)\n if self.use_bos:\n start_index = 1\n if self.use_eos:\n end_index = -1\n return tokens[start_index:end_index]\n\n def gen_masked_tree(self, node, mask_token, depth=1):\n if self.should_mask(depth):\n actual_str_len = len(node.flat_str().strip().split(\" \"))\n return \" \".join([mask_token for idx in range(actual_str_len)])\n else:\n return_str = \" \"\n if (\n isinstance(node, Intent)\n or isinstance(node, Slot)\n or isinstance(node, Root)\n ):\n return_str += \"[\"\n return_str += node.label\n return_str += \" \"\n for child in node.children:\n return_str += self.gen_masked_tree(child, mask_token, depth + 1)\n return_str += \" \"\n return_str += \"]\"\n else:\n return_str += node.label\n return_str += \" \"\n return return_str.strip()\n\n def should_mask(self, depth=1):\n return np.random.random() < 1.0 / (self.factor ** depth)\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n cleaned_tokens = self.clean_eos_bos(tokens)\n original_target_string = \" \".join(\n [vocab[idx] for idx in cleaned_tokens]\n ).upper()\n try:\n annotation = Annotation(\n original_target_string,\n accept_flat_intents_slots=self.accept_flat_intents_slots,\n )\n except Exception as e:\n # This should never happen other than when testing\n print(e, original_target_string)\n dec_source = [vocab.idx[vocab.mask_token] for _ in range(len(tokens))]\n dec_target = [vocab.idx[vocab.pad_token] for _ in range(len(tokens))]\n return dec_source, dec_target\n assert len(annotation.root.children) == 1\n mask_tree_str = self.gen_masked_tree(\n annotation.root.children[0], vocab.mask_token\n )\n\n # We are calling the .split() instead of the tokenize() of tensorizer\n # because the input str contains special MASK token __MASK__\n # It we call tokenize() on this input_str, it may lower __MASK__ or split\n # in unexpected ways causing issues.\n # Hence temporary workaround is that we call split(\" \") and lower all tokens\n # other than MASK tokens\n\n # handle special tokens in vocab\n mask_tree_str: List[str] = list(\n map(\n lambda token: SPECIAL_TOKENS.get(token, token.lower()),\n mask_tree_str.split(\" \"),\n )\n )\n\n dec_source = [vocab.idx.get(t) for t in mask_tree_str]\n\n dec_target = self._prepare_dec_target(dec_source, cleaned_tokens, vocab)\n\n if self.use_bos:\n if self.should_mask():\n dec_source.insert(0, vocab.get_mask_index())\n dec_target.insert(0, vocab.get_bos_index())\n else:\n dec_source.insert(0, vocab.get_bos_index())\n dec_target.insert(0, vocab.get_pad_index())\n\n if self.use_eos:\n if self.should_mask():\n dec_source.append(vocab.get_mask_index())\n dec_target.append(vocab.get_eos_index())\n else:\n dec_source.append(vocab.get_eos_index())\n dec_target.append(vocab.get_pad_index())\n return dec_source, dec_target\n\n\nclass MaskEverything(MaskingFunction):\n def gen_masked_tree(self, node, mask_token, depth=1):\n actual_str_len = len(node.flat_str().strip().split(\" \"))\n return \" \".join([mask_token for idx in range(actual_str_len)])\n\n def gen_masked_source_target(self, tokens, vocab: Vocabulary):\n dec_source: List[int] = [vocab.get_mask_index() for idx in tokens]\n dec_target = self._prepare_dec_target(dec_source, tokens, vocab)\n return dec_source, dec_target\n\n\nclass RandomizedMaskingFunction(MaskingFunction):\n class Config(MaskingFunction.Config):\n seed: Optional[int] = None\n minimum_masks: int = 1\n\n @classmethod\n def from_config(cls, config: Config, use_bos: bool, use_eos: bool):\n return cls(config.seed, config.minimum_masks, use_bos, use_eos)\n\n def __init__(\n self, seed: Optional[int], minimum_masks: int, use_bos: bool, use_eos: bool\n ):\n super().__init__(use_bos, use_eos)\n self.random = np.random.RandomState(seed)\n self.minimum_masks = minimum_masks\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n num_masks = self.random.randint(self.minimum_masks, len(tokens))\n\n ind: Set[int] = set(\n self.random.choice(len(tokens), size=num_masks, replace=False)\n )\n\n dec_source: List[int] = [\n vocab.get_mask_index() if idx in ind else token\n for idx, token in enumerate(tokens)\n ]\n\n dec_target = self._prepare_dec_target(dec_source, tokens, vocab)\n\n return dec_source, dec_target\n\n\nclass NoOpMaskingFunction(MaskingFunction):\n class Config(MaskingFunction.Config):\n seed: Optional[int] = None\n minimum_masks: int = 1\n\n @classmethod\n def from_config(cls, config: Config, use_bos: bool, use_eos: bool):\n return cls(config.seed, config.minimum_masks, use_bos, use_eos)\n\n def __init__(\n self, seed: Optional[int], minimum_masks: int, use_bos: bool, use_eos: bool\n ):\n super().__init__(use_bos, use_eos)\n self.random = np.random.RandomState(seed)\n self.minimum_masks = minimum_masks\n\n def gen_masked_source_target(self, tokens: List[int], vocab: Vocabulary):\n dec_target = self._prepare_dec_target(tokens, tokens, vocab)\n\n return tokens, dec_target\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom typing import List, Optional, Tuple\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom pytext.models.module import create_module\nfrom pytext.utils.usage import log_class_usage\n\nfrom .decoder_base import DecoderBase\nfrom .mlp_decoder import MLPDecoder\n\n\nclass IntentSlotModelDecoder(DecoderBase):\n \"\"\"\n `IntentSlotModelDecoder` implements the decoder layer for intent-slot models.\n Intent-slot models jointly predict intent and slots from an utterance.\n At the core these models learn to jointly perform document classification\n and word tagging tasks.\n\n `IntentSlotModelDecoder` accepts arguments for decoding both document\n classification and word tagging tasks, namely, `in_dim_doc` and `in_dim_word`.\n\n Args:\n config (type): Configuration object of type IntentSlotModelDecoder.Config.\n in_dim_doc (type): Dimension of input Tensor for projecting document\n representation.\n in_dim_word (type): Dimension of input Tensor for projecting word\n representation.\n out_dim_doc (type): Dimension of projected output Tensor for document\n classification.\n out_dim_word (type): Dimension of projected output Tensor for word tagging.\n\n Attributes:\n use_doc_probs_in_word (bool): Whether to use intent probabilities for\n predicting slots.\n doc_decoder (type): Document/intent decoder module.\n word_decoder (type): Word/slot decoder module.\n\n \"\"\"\n\n class Config(DecoderBase.Config):\n \"\"\"\n Configuration class for `IntentSlotModelDecoder`.\n\n Attributes:\n use_doc_probs_in_word (bool): Whether to use intent probabilities\n for predicting slots.\n \"\"\"\n\n use_doc_probs_in_word: bool = False\n doc_decoder: MLPDecoder.Config = MLPDecoder.Config()\n word_decoder: MLPDecoder.Config = MLPDecoder.Config()\n\n def __init__(\n self,\n config: Config,\n in_dim_doc: int,\n in_dim_word: int,\n out_dim_doc: int,\n out_dim_word: int,\n ) -> None:\n super().__init__(config)\n\n self.use_doc_probs_in_word = config.use_doc_probs_in_word\n\n self.doc_decoder = create_module(\n config.doc_decoder, in_dim=in_dim_doc, out_dim=out_dim_doc\n )\n if self.use_doc_probs_in_word:\n in_dim_word += out_dim_doc\n\n self.word_decoder = create_module(\n config.word_decoder, in_dim=in_dim_word, out_dim=out_dim_word\n )\n log_class_usage(__class__)\n\n def forward(\n self, x_d: torch.Tensor, x_w: torch.Tensor, dense: Optional[torch.Tensor] = None\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n if dense is not None:\n logit_d = self.doc_decoder(torch.cat((x_d, dense), 1))\n else:\n logit_d = self.doc_decoder(x_d)\n\n if self.use_doc_probs_in_word:\n # Get doc probability distribution\n doc_prob = F.softmax(logit_d, 1)\n word_input_shape = x_w.size()\n doc_prob = doc_prob.unsqueeze(1).repeat(1, word_input_shape[1], 1)\n x_w = torch.cat((x_w, doc_prob), 2)\n\n if dense is not None:\n word_input_shape = x_w.size()\n dense = dense.unsqueeze(1).repeat(1, word_input_shape[1], 1)\n x_w = torch.cat((x_w, dense), 2)\n\n return logit_d, self.word_decoder(x_w)\n\n def get_decoder(self) -> List[nn.Module]:\n \"\"\"Returns the document and word decoder modules.\"\"\"\n return [self.doc_decoder, self.word_decoder]\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nfrom typing import List\n\nfrom pytext.utils.usage import log_class_usage\nfrom torch import nn\nfrom torch.nn import functional as F\n\n\nclass GeLU(nn.Module):\n \"\"\"Component class to wrap F.gelu.\"\"\"\n\n def forward(self, input):\n return F.gelu(input)\n\n\nclass ResidualMLP(nn.Module):\n \"\"\"A square MLP component which can learn a bias on an input vector.\n This MLP in particular defaults to using GeLU as its activation function\n (this can be changed by passing a different activation function),\n and retains a residual connection to its original input to help with gradient\n propogation.\n\n Unlike pytext's MLPDecoder it doesn't currently allow adding a LayerNorm\n in between hidden layers.\n \"\"\"\n\n def __init__(\n self,\n input_dim: int,\n hidden_dims: List[int],\n dropout: float = 0.1,\n activation=GeLU,\n ):\n super().__init__()\n modules = []\n for last_dim, dim in zip([input_dim] + hidden_dims, hidden_dims):\n modules.extend(\n [nn.Linear(last_dim, dim), activation(), nn.Dropout(dropout)]\n )\n\n last_dim = hidden_dims[-1] if hidden_dims else input_dim\n # Unlike normal PyText mlp, we don't put an activation layer at the end.\n modules.extend([nn.Linear(last_dim, input_dim), nn.Dropout(dropout)])\n\n self.mlp = nn.Sequential(*modules)\n log_class_usage(__class__)\n\n def forward(self, input):\n bias = self.mlp(input)\n return input + bias\n",
"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\n\nimport zipfile\nfrom typing import Dict\n\nimport torch\nimport torch.jit\nimport torch.nn as nn\nfrom pytext.config.component import Component, ComponentType, create_component\nfrom pytext.config.module_config import ModuleConfig\nfrom pytext.utils.file_io import PathManager\nfrom pytext.utils.usage import log_class_usage\n\n\nSHARED_MODULE_REGISTRY: Dict[str, torch.nn.Module] = {}\n\n\ndef _create_module_from_registry(module_config, *args, **kwargs):\n return create_component(ComponentType.MODULE, module_config, *args, **kwargs)\n\n\ndef create_module(\n module_config, *args, create_fn=_create_module_from_registry, **kwargs\n):\n \"\"\"Create module object given the module's config object. It depends on the\n global shared module registry. Hence, your module must be available for the\n registry. This entails that your module must be imported somewhere in the\n code path during module creation (ideally in your model class) for the module\n to be visible for registry.\n\n Args:\n module_config (type): Module config object.\n create_fn (type): The function to use for creating the module. Use this\n parameter if your module creation requires custom code and pass your\n function here. Defaults to `_create_module_from_registry()`.\n\n Returns:\n type: Description of returned object.\n\n \"\"\"\n # the first module with a given shared_module_key and type is saved in\n # SHARED_MODULE_REGISTRY. The rest will reuse the saved module and thus\n # share parameters.\n shared_module_key = getattr(module_config, \"shared_module_key\", None)\n typed_shared_module_key = (shared_module_key, type(module_config))\n load_path = getattr(module_config, \"load_path\", None)\n module = SHARED_MODULE_REGISTRY.get(typed_shared_module_key)\n\n if not module:\n if load_path:\n with PathManager.open(load_path, \"rb\") as load_file:\n loaded_module = torch.load(load_file, map_location=\"cpu\")\n\n if isinstance(loaded_module, dict):\n # Loaded module is a state dict\n module = create_fn(module_config, *args, **kwargs)\n module.load_state_dict(loaded_module)\n else:\n # Loaded module is a torchscripted module\n module = loaded_module\n\n name = type(module).__name__\n print(f\"Loaded state of module {name} from {load_path} ...\")\n\n else:\n module = create_fn(module_config, *args, **kwargs)\n\n name = type(module).__name__\n if getattr(module_config, \"freeze\", False):\n print(f\"Freezing the parameters of module {name} ...\")\n module.freeze()\n if shared_module_key:\n SHARED_MODULE_REGISTRY[typed_shared_module_key] = module\n module.save_path = getattr(module_config, \"save_path\", None)\n return module\n\n\nclass Module(nn.Module, Component):\n \"\"\"Generic module class that serves as base class for all PyText modules.\n\n Args:\n config (type): Module's `config` object. Specific contents of this object\n depends on the module. Defaults to None.\n\n \"\"\"\n\n Config = ModuleConfig\n\n __COMPONENT_TYPE__ = ComponentType.MODULE\n\n def __init__(self, config=None) -> None:\n nn.Module.__init__(self)\n Component.__init__(self, config)\n log_class_usage(__class__)\n\n def freeze(self) -> None:\n for param in self.parameters():\n param.requires_grad = False\n"
] | [
[
"numpy.random.RandomState",
"numpy.random.random"
],
[
"torch.nn.functional.softmax",
"torch.cat"
],
[
"torch.nn.Linear",
"torch.nn.functional.gelu",
"torch.nn.Dropout",
"torch.nn.Sequential"
],
[
"torch.nn.Module.__init__",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XavierValParejo/SeedBot | [
"7b338184ac9137027c726c43b481c2f79ad12b51"
] | [
"Code/Mapping/ultrasound_mapping.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport lidar_to_grid_map as lg\nfrom grid_mapping_for_a_star import OccupancyGridMap\nfrom a_star_for_ogm_testing import a_star\n\nf = \"mesures.txt\"\n\ndef read_measures(file):\n measures = [line.split(\",\") for line in open(file)]\n angles = []\n distances = []\n for measure in measures:\n angles.append(float(measure[0]))\n distances.append(float(measure[1]))\n ang = np.array(angles)\n dist = np.array(distances)\n return dist,ang\n\ndef map_surroundings(dist):\n xyreso = 0.02 # x-y grid resolution\n yawreso = math.radians(3.1) # yaw angle resolution [rad]\n ox = np.sin(ang) * dist\n oy = np.cos(ang) * dist\n pmap, minx, maxx, miny, maxy, xyreso = lg.generate_ray_casting_grid_map(ox, oy, xyreso, False)\n xyres = np.array(pmap).shape\n return pmap\n\ndef input_points(pmap):\n for x in dist:\n x = x / 10\n ogm = OccupancyGridMap(pmap, 1)\n path, path_idx = a_star((25,30), (50,40),ogm)\n xPath, yPath = zip(*path)\n return ogm\n\ndist, ang = read_measures(f)\nmapp = map_surroundings(dist)\ntipus_=input_points(mapp)"
] | [
[
"numpy.array",
"numpy.cos",
"numpy.sin"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vivid-k/code | [
"c39d5a0ba219b499e595812a31362a8f2535859e",
"c39d5a0ba219b499e595812a31362a8f2535859e"
] | [
"AREL-data-process/Data_process.py",
"models/model_utils.py"
] | [
"import json\nimport os.path as osp\nimport matplotlib.pyplot as plt\nfrom collections import OrderedDict\nfrom collections import Counter\nimport numpy\nimport h5py\nimport os\n\n\n\"\"\"\n处理文本数据,提取出story,并构建词表\n\"\"\"\nbase_path = \"AREL-data-process/\"\ntrain_data = json.load(open(osp.join(base_path, \"test.story-in-sequence.json\")))\n# train_data = None\nval_data = json.load(open(osp.join(base_path, \"val.story-in-sequence.json\")))\ntest_data = json.load(open(osp.join(base_path, \"test.story-in-sequence.json\")))\n\n### 处理图像数据\nprefix = [\"train\", \"val\", \"test\"]\nwhole_album2im = {}\nfor i, data in enumerate([train_data, val_data, test_data]):\n album2im = {} # 按照album存储图像数据,键为album_id,值为img_id,1-多\n for im in data['images']: # 遍历每一张图像\n if im['id'] == '210929621':\n print(im)\n if im['album_id'] not in album2im: # 以album区分,若album_id并未存储,则为新的album\n album2im[im['album_id']] = [im['id']]\n else: # 该album已存在,则append,注意数据已经按照时间排序\n if im['id'] not in album2im[im['album_id']]:\n album2im[im['album_id']].append(im['id'])\n whole_album2im[prefix[i]] = album2im\n\nfor i, data in enumerate([train_data, val_data, test_data]):\n a = [] # 按照album存储图像数据,键为album_id,值为img_id,1-多\n for im in data['images']: # 遍历每一张图像\n if im['id'] not in a: # 以album区分,若album_id并未存储,则为新的album\n a.append(im['id'])\n print(len(a))\n\n### 处理文本数据\nwhole_album = {}\nstory_lines = {} # 存储每个故事,每个故事五句话,index为0、5、10\nwhole_lines = {} # 存储每个故事,一行存储,index为0、1、2\nstory_line_count = 0 # 句子数量\nwhole_line_count = 0 # story数量\nfor i, data in enumerate([train_data, val_data, test_data]):\n album_mapping = {} # 存储story\n for annot_new in data[\"annotations\"]: # 遍历每组数据\n annot = annot_new[0] # album_id\n assert len(annot_new) == 1\n text = bytes.decode(annot['text'].encode('utf8')) # 字段中包含origin_text和text,前者为原始文本,后者为匿名文本\n if annot['story_id'] not in album_mapping: # story_id为这段描述的id(5张图片)\n album_mapping[annot['story_id']] = {\"text_index\": [story_line_count], \"flickr_id\": [annot['photo_flickr_id']], \"length\": 1, \n \"album_id\": annot['album_id'], \"album_flickr_id\": whole_album2im[prefix[i]][annot['album_id']],\n \"whole_text_index\": whole_line_count, \"origin_text\": text} # story_line_count表示一个句子的id,flickr_id为图片id,album_id,album_flickr_id对应album对应的图像列表,whole_text_index<story_line_count即为story数量\n story_lines[annot['story_id']] = [{\"index\": story_line_count, \"text\": text.split()}]\n whole_lines[annot['story_id']] = {\"index\": whole_line_count, \"text\": text.split()}\n whole_line_count +=1\n else:\n album_mapping[annot['story_id']][\"text_index\"].append(story_line_count)\n album_mapping[annot['story_id']][\"flickr_id\"].append(annot['photo_flickr_id'])\n album_mapping[annot['story_id']][\"length\"] += 1 # length计算当前story长度,1-5\n story_lines[annot['story_id']].append({\"index\": story_line_count, \"text\": text.split()}) \n whole_lines[annot['story_id']][\"text\"].extend(text.split())\n album_mapping[annot['story_id']][\"origin_text\"] += \" \" + text\n story_line_count += 1\n whole_album[prefix[i]] = album_mapping\n\nnew_story_lines = [] \nfor l in story_lines.values():\n for li in l:\n new_story_lines.append(li)\nstory_lines = new_story_lines\nwhole_lines = whole_lines.values()\n\nstory_lines = [r['text'] for r in sorted(story_lines, key=lambda thing: thing['index'])] # 一个句子存储一行\nwhole_lines = [r['text'] for r in sorted(whole_lines, key=lambda thing: thing['index'])] # 一个故事存储一行\n\nprint(len(story_lines))\nprint(len(whole_lines))\n\n\ncnt = Counter() # 可以进行计数,词表构建(词:出现次数)\nfor l in story_lines:\n words = l\n for w in words:\n cnt[w] += 1\nwords2id = {}\nidx = 2\n## 构建词表\nfor k, v in cnt.most_common():\n if v > 5:\n words2id[k] = idx\n idx += 1\nwords2id[\"<EOS>\"] = 0\nwords2id[\"<UNK>\"] = 1\nid2words = {v:k for k,v in words2id.items()}\nprint(len(id2words))\n\nwhole_album[\"words2id\"] = words2id\nwhole_album[\"id2words\"] = {v:k for k,v in words2id.items()}\n\n# 将文本转换为id\nid_story_lines = []\nfor l in story_lines:\n s = [words2id[w] if w in words2id else 1 for w in l]\n id_story_lines.append(s)\n\nid_whole_lines = []\nfor l in whole_lines:\n s = [words2id[w] if w in words2id else 1 for w in l]\n id_whole_lines.append(s)\n\n# 进行padding,padding为0,长度为105\nnew_id_whole_lines = []\nspecify_longest = 105\nfor i in range(len(id_whole_lines)):\n cur_len = len(id_whole_lines[i])\n if cur_len < specify_longest:\n new_id_whole_lines.append(id_whole_lines[i] + [0] * (specify_longest - cur_len))\n else:\n new_id_whole_lines.append(id_whole_lines[i][:specify_longest-1] + [0])\n# shape(50200,105)\ndata = numpy.asarray(new_id_whole_lines)\n\n# f = h5py.File(\"full_story.h5\", \"w\")\n# f.create_dataset(\"story\", data=data)\n# f.close()\n## 对单个句子进行padding,大小为30\nnew_id_story_lines = []\nspecify_longest = 30\nfor i in range(len(id_story_lines)):\n cur_len = len(id_story_lines[i])\n if cur_len < specify_longest:\n new_id_story_lines.append(id_story_lines[i] + [0] * (specify_longest - cur_len))\n else:\n new_id_story_lines.append(id_story_lines[i][:specify_longest-1] + [0])\n## (25100,30)\ndata = numpy.asarray(new_id_story_lines, \"int32\")\n\n# f = h5py.File(\"story.h5\", \"w\")\n# f.create_dataset(\"story\", data=data)\n# f.close()\n\n# # 删除图像少于5张的\n# for p in prefix:\n# path = \"/mnt/sshd/wenhuchen/VIST/images_256/{}/\".format(p)\n# deletables = []\n# for story_id, story in whole_album[p].items():\n# d = [osp.exists(osp.join(path, \"{}.jpg\".format(_))) for _ in story[\"flickr_id\"]]\n# if sum(d) < 5:\n# print(\"deleting {}\".format(story_id))\n# deletables.append(story_id)\n# else:\n# pass\n# for i in deletables:\n# del whole_album[p][i]\n\n# 构建图像与story的映射\nflickr_story_map = {}\nfor pre in prefix:\n album = whole_album[pre]\n for k, v in album.items():\n indexes = v['text_index']\n for i, flickr_id in enumerate(v['flickr_id']):\n if flickr_id not in flickr_story_map:\n flickr_story_map[flickr_id] = [indexes[i]]\n else:\n flickr_story_map[flickr_id].append(indexes[i])\n\n# 画出story的长度分布\n# length_distribution = [len(s) for s in whole_lines]\n# result = plt.hist(length_distribution, bins='auto', cumulative=True, normed=1)\n# plt.show()\n# length_distribution = [len(s) for s in story_lines]\n# result = plt.hist(length_distribution, bins='auto', cumulative=True, normed=1)\n# plt.hist(length_distribution, bins='auto')\n# plt.show()\n\n\n\"\"\"\n处理文本数据,提取出caption\n\"\"\"\nbase_path = \"AREL-data-process/dii/\"\ntrain_data = json.load(open(osp.join(base_path, \"train.description-in-isolation.json\")))\nval_data = json.load(open(osp.join(base_path, \"val.description-in-isolation.json\")))\ntest_data = json.load(open(osp.join(base_path, \"test.description-in-isolation.json\")))\n\nmapping = {}\nmapping_original = {}\ntext_list = []\ntext_list_count = 0\nunknown_words = 0\ntotal_words = 0\nwith_story = 0\nno_story = 0\nfor i, data in enumerate([train_data, val_data, test_data]):\n mapping[prefix[i]] = {}\n mapping_original[prefix[i]] = {}\n for l in data['annotations']:\n if l[0]['photo_flickr_id'] not in mapping[prefix[i]]:\n if l[0]['photo_flickr_id'] in flickr_story_map:\n stories = flickr_story_map[l[0]['photo_flickr_id']]\n else:\n stories = [-1]\n mapping[prefix[i]][l[0]['photo_flickr_id']] = {'caption': [text_list_count], 'story': stories}\n mapping_original[prefix[i]][l[0]['photo_flickr_id']] = [l[0]['text']]\n else:\n mapping[prefix[i]][l[0]['photo_flickr_id']]['caption'].append(text_list_count)\n mapping_original[prefix[i]][l[0]['photo_flickr_id']].append(l[0]['text'])\n text_list_count += 1\n assert len(l) == 1\n s = []\n for w in l[0]['text'].split(\" \"):\n if w in words2id:\n s.append(words2id[w]) \n else:\n s.append(1)\n unknown_words += 1\n total_words += 1\n text_list.append(s)\nfor pre in prefix:\n count = 0\n for i in mapping[pre]:\n value = mapping[pre][i]\n if len(value['caption']) == 0:\n count += 1\n print(count)\n\nprint(\"unknown words percent is {}\".format(unknown_words / (total_words + 0.0)))\nnew_text_list = []\nspecify_longest = 20\nfor i in range(len(text_list)):\n cur_len = len(text_list[i])\n if cur_len < specify_longest:\n new_text_list.append(text_list[i] + [0] * (specify_longest - cur_len))\n else:\n new_text_list.append(text_list[i][:specify_longest - 1] + [0]) \n\n# for p in prefix:\n# path = \"/mnt/sshd/wenhuchen/VIST/images_256/{}/\".format(p)\n# deletables = []\n# for flickr_id, story in mapping[p].items():\n# if not osp.exists(osp.join(path, \"{}.jpg\".format(flickr_id))):\n# deletables.append(flickr_id)\n# for i in deletables:\n# del mapping[p][i]\n# del mapping_original[p][i]\n \nwhole_album[\"image2caption\"] = mapping\nwhole_album[\"image2caption_original\"] = mapping_original\n\n# with open(\"story_line.json\", 'w') as f:\n# json.dump(whole_album, f)\n\ntext_array = numpy.asarray(new_text_list, dtype='int32')\n\n# f = h5py.File(\"description.h5\", 'w')\n# f.create_dataset(\"story\", data=text_array)\n# f.close()\n\nval_data = json.load(open(osp.join(base_path, \"val.description-in-isolation.json\")))\nwith open(\"val_desc_reference\", \"w\") as f:\n for l in val_data['annotations']:\n # print >> f, \"{}\\t{}\".format(l[0]['photo_flickr_id'], l[0]['text'])\n print(l[0]['photo_flickr_id'], l[0]['text'])\n\nf = h5py.File(\"full_story.h5\", \"r\")\nprint(f['story'][0])\n\nf = h5py.File(\"story.h5\", \"r\")\nprint(f['story'].shape)\n\nf = open(\"story_line.json\", 'r')\ndata = json.load(f)\nprint(len(data['id2words']))\n\n# zero_fc = numpy.zeros((2048, ), \"float32\")\n# zero_conv = numpy.zeros((2048, 7, 7), \"float32\")\n\n# train_fc_base = \"/mnt/sshd/xwang/VIST/feature/train/fc\"\n# train_conv_base = \"/mnt/sshd/xwang/VIST/feature/train/conv\"\n# train_name1 = [l.split(\".\")[0] for l in os.listdir(train_fc_base)]\n\n# train_image_base = \"/mnt/sshd/wenhuchen/VIST/images/train\"\n# train_name2 = [l.split(\".\")[0] for l in os.listdir(train_image_base)]\n\n# rest = set(train_name2) - set(train_name1)\n# for image in rest:\n# numpy.save(os.path.join(train_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(train_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# val_fc_base = \"/mnt/sshd/xwang/VIST/feature/val/fc\"\n# val_conv_base = \"/mnt/sshd/xwang/VIST/feature/val/conv\"\n# val_name1 = [l.split(\".\")[0] for l in os.listdir(val_fc_base)]\n\n# val_image_base = \"/mnt/sshd/wenhuchen/VIST/images/val\"\n# val_name2 = [l.split(\".\")[0] for l in os.listdir(val_image_base)]\n\n# rest = set(val_name2) - set(val_name1)\n# for image in rest:\n# numpy.save(os.path.join(val_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(val_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# test_fc_base = \"/mnt/sshd/xwang/VIST/feature/test/fc\"\n# test_conv_base = \"/mnt/sshd/xwang/VIST/feature/test/conv\"\n# test_name1 = [l.split(\".\")[0] for l in os.listdir(test_fc_base)]\n\n# test_image_base = \"/mnt/sshd/wenhuchen/VIST/images/test\"\n# test_name2 = [l.split(\".\")[0] for l in os.listdir(test_image_base)]\n\n# rest = set(test_name2) - set(test_name1)\n# for image in rest:\n# numpy.save(os.path.join(test_fc_base, \"{}.npy\".format(image)), zero_fc) \n# numpy.save(os.path.join(test_conv_base, \"{}.npy\".format(image)), zero_conv) \n\n# with open(\"story_line.json\", 'r') as f: \n# data = json.load(f)\n\n# print(len(data['image2caption']['train']))\n# print(len(data['train']))",
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport numpy as np\nimport time\nfrom models.attention import luong_gate_attention, MultiHeadAttention, FocusAttention\n\ndef get_sinusoid_encoding_table(d_hid, n_position=5):\n ''' Sinusoid position encoding table '''\n\n def cal_angle(position, hid_idx):\n return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)\n\n def get_posi_angle_vec(position):\n return [cal_angle(position, hid_j) for hid_j in range(d_hid)]\n\n sinusoid_table = np.array([get_posi_angle_vec(pos_i) for pos_i in range(n_position)])\n\n sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i\n sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1\n\n return torch.FloatTensor(sinusoid_table)\nclass PositionwiseFeedForward(nn.Module):\n ''' A two-feed-forward-layer module '''\n\n def __init__(self, d_in, d_hid, dropout=0.1):\n super().__init__()\n self.w_1 = nn.Conv1d(d_in, d_hid, 1) # position-wise\n self.w_2 = nn.Conv1d(d_hid, d_in, 1) # position-wise\n self.layer_norm = nn.LayerNorm(d_in)\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x):\n residual = x\n output = x.transpose(1, 2)\n output = self.w_2(F.relu(self.w_1(output)))\n output = output.transpose(1, 2)\n output = self.dropout(output)\n output = self.layer_norm(output + residual)\n return output\n\nclass EncoderLayer(nn.Module):\n ''' Compose with two layers '''\n\n def __init__(self, d_model, d_inner, n_head=8, d_k=64, d_v=64, dropout=0.1):\n super(EncoderLayer, self).__init__()\n self.slf_attn = MultiHeadAttention(\n n_head, d_model, d_k, d_v, dropout=dropout)\n self.pos_ffn = PositionwiseFeedForward(d_model, d_inner, dropout=dropout)\n\n def forward(self, enc_input):\n enc_output, enc_slf_attn = self.slf_attn(\n enc_input, enc_input, enc_input)\n enc_output = self.pos_ffn(enc_output)\n\n return enc_output, enc_slf_attn\n\nclass AttentionLayer(nn.Module):\n def __init__(self, hidden_dim_en, hidden_dim_de, projected_size):\n super(AttentionLayer, self).__init__()\n self.linear1 = nn.Linear(hidden_dim_en, projected_size)\n self.linear2 = nn.Linear(hidden_dim_de, projected_size)\n self.linear3 = nn.Linear(projected_size, 1, False)\n\n def forward(self, out_e, h):\n '''\n out_e: batch_size * num_frames * en_hidden_dim\n h : batch_size * de_hidden_dim\n '''\n assert out_e.size(0) == h.size(0)\n batch_size, num_frames, _ = out_e.size()\n hidden_dim = h.size(1)\n\n h_att = h.unsqueeze(1).expand(batch_size, num_frames, hidden_dim)\n x = F.tanh(F.dropout(self.linear1(out_e)) + F.dropout(self.linear2(h_att)))\n x = F.dropout(self.linear3(x))\n a = F.softmax(x.squeeze(2))\n\n return a\n\ndef graph_attn(alpha, cen_state, adj_state, max_len):\n \"\"\"\n graph attention. calculate the graph attention score for cen_state\n Args:\n alpha: float hyper parameters\n cen_state: tensor acts as central node batch_size * 1 * hidden_dim\n other_state: tensor acts as adjacent node batch_size * max_len * hidden_dim\n M: tensor learned param matrix hidden_dim * hidden_dim\n max_len: int maximum number of adjacent node\n Returns:\n socre: tensor batch_size * max_len\n \"\"\"\n batch_size = cen_state.shape[0]\n hidden_dim = cen_state.shape[-1]\n # concatenate 将解码节点与编码节点拼接,构成图\n state = torch.cat((cen_state.unsqueeze(1), adj_state), dim=1) # batch_size * max_len + 1 * hidden_dim\n \n M = nn.Linear(hidden_dim, hidden_dim).cuda() \n W = M(state) # batch_size * max_len + 1 * hidden_dim\n W = torch.matmul(state, W.transpose(1, 2)) # batch_size * max_len + 1 * max_len + 1\n \n W_sum = torch.sum(W, dim=2) # batch_size * max_len + 1\n W_sum = torch.unsqueeze(W_sum, -1) # batch_size * max_len + 1 * 1\n W_sum = W_sum.repeat((1, 1, max_len + 1)) # batch_size * max_len + 1 * max_len + 1\n \n D = torch.eye(max_len + 1).cuda() # max_len + 1 * max_len + 1\n D = torch.unsqueeze(D, 0) # 1 * max_len + 1 * max_len + 1\n D = D.repeat((batch_size, 1, 1)) * W_sum # batch_size * max_len + 1 * max_len + 1 点乘\n P = alpha * torch.matmul(W, torch.inverse(D[:])) # batch_size * max_len + 1 * max_len + 1\n\n I = torch.unsqueeze(torch.eye(max_len + 1), 0).cuda() # 1 * max_len + 1 * max_len + 1\n I = I.repeat(batch_size, 1, 1) - P # batch_size * max_len + 1 * max_len + 1\n Q = torch.inverse(I[:]) # batch_size * max_len + 1 * max_len + 1\n \n Y = torch.cat((torch.ones((batch_size, 1)), torch.zeros(batch_size, max_len)), 1).cuda() # batch_size * max_len + 1\n Y = torch.unsqueeze(Y, -1)\n score = (1 - alpha) * torch.matmul(Q, Y)\n score = F.softmax(score, dim=1)\n # score_mask = (score.squeeze() - last) > 0\n # score_mask = score_mask.float()\n # score = (score.squeeze() - last) * score_mask\n # score_sum = score.sum(1).unsqueeze(1)\n # score = score[:] / score_sum[:]\n state = torch.matmul(state.transpose(1, 2), score).squeeze() # 64*512*1\n\n return state\n\ndef _smallest(matrix, k, only_first_row=False):\n # matrix : beam*vocab(记录了到当前步骤的总cost) k:beam\n # 选取beam个最小的\n if only_first_row: # 是否为第一个词,第一个词概率都相同,取第一行即可\n flatten = matrix[:1, :].flatten() # 取出第一行概率分布,9837\n else:\n flatten = matrix.flatten()\n args = np.argpartition(flatten, k)[:k] # 比第三名好的放在数组前面,差的放在后面,无序,返回索引\n args = args[np.argsort(flatten[args])] # 取出相应的值并排序,argsort返回下标,args取出相应索引值\n # 返回值:前面返回matrix中的位置,最后一个返回概率最大的三个值\n return np.unravel_index(args, matrix.shape), flatten[args] # 前面函数计算args在matrix维度的矩阵中位置\n\nclass VisualEncoder(nn.Module):\n\n def __init__(self, opt):\n super(VisualEncoder, self).__init__()\n self.feat_size = opt.feat_size # 2048\n self.embed_dim = opt.word_embed_dim # 512\n\n self.rnn_type = opt.rnn_type # gru\n self.num_layers = opt.num_layers # 1\n self.hidden_dim = opt.hidden_dim # 512\n self.dropout = opt.visual_dropout # 0.2\n self.story_size = opt.story_size # 5\n self.with_position = opt.with_position # False\n self.opt = opt\n # visual embedding layer\n self.visual_emb = nn.Sequential(nn.Linear(self.feat_size, self.embed_dim),\n nn.BatchNorm1d(self.embed_dim),\n nn.ReLU(True))\n self.hin_dropout_layer = nn.Dropout(self.dropout)\n\n if self.rnn_type == 'gru':\n self.rnn = nn.GRU(input_size=self.embed_dim, hidden_size=self.hidden_dim,\n dropout=self.dropout, batch_first=True, bidirectional=True)\n elif self.rnn_type == 'lstm':\n self.rnn = nn.LSTM(input_size=self.embed_dim, hidden_size=self.hidden_dim,\n dropout=self.dropout, batch_first=True, bidirectional=True)\n else:\n raise Exception(\"RNN type is not supported: {}\".format(self.rnn_type))\n self.rnn_dec = nn.LSTM(input_size=self.embed_dim, hidden_size=self.hidden_dim,\n dropout=self.dropout, batch_first=True, bidirectional=False)\n if self.opt.mem:\n # self.linear_read = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.Sigmoid())\n self.linear_read = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.Sigmoid())\n if self.opt.is_write:\n self.linear_write = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.Sigmoid())\n self.linear_mem = nn.Linear(self.hidden_dim * 2, self.hidden_dim)\n # self.linear_q = nn.Linear(self.hidden_dim, self.hidden_dim)\n # self.linear_fun = nn.Sequential(nn.Linear(self.hidden_dim * 2, self.hidden_dim),\n # nn.BatchNorm1d(self.hidden_dim),\n # nn.ReLU(True))\n if self.opt.context_dec: \n # self.attention = MultiHeadAttention(8, self.hidden_dim, 64, 64)\n # self.pos_ffn = PositionwiseFeedForward(self.hidden_dim, 2048)\n # self.layer_stack = nn.ModuleList([EncoderLayer(self.hidden_dim, 2048) for _ in range(3)])\n # self.position_enc = nn.Embedding.from_pretrained(get_sinusoid_encoding_table(self.hidden_dim), freeze=True)\n self.attention = luong_gate_attention(self.hidden_dim, self.embed_dim)\n # self.transformer = nn.TransformerEncoderLayer(512, 8)\n # self.transformer_encoder = nn.TransformerEncoder(self.transformer, 6)\n # self.linear_fun = nn.Linear(self.hidden_dim * 2, self.hidden_dim)\n if self.opt.swish:\n self.sw1 = nn.Sequential(nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=1, padding=0), nn.BatchNorm1d(self.hidden_dim), nn.ReLU())\n self.sw3 = nn.Sequential(nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=1, padding=0), nn.ReLU(), nn.BatchNorm1d(self.hidden_dim),\n nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(self.hidden_dim))\n self.sw33 = nn.Sequential(nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=1, padding=0), nn.ReLU(), nn.BatchNorm1d(self.hidden_dim),\n nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(self.hidden_dim),\n nn.Conv1d(self.hidden_dim, self.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(self.hidden_dim))\n self.linear = nn.Sequential(nn.Linear(2*self.hidden_dim, 2*self.hidden_dim), nn.GLU(), nn.Dropout(self.dropout))\n self.filter_linear = nn.Linear(3*self.hidden_dim, self.hidden_dim)\n self.tanh = nn.Tanh()\n self.sigmoid = nn.Sigmoid()\n if self.opt.att:\n if self.opt.multihead:\n self.focus = MultiHeadAttention(self.hidden_dim, num_heads=self.opt.num_heads)\n else:\n self.attention = luong_gate_attention(self.hidden_dim, self.embed_dim)\n # self.focus = FocusAttention(self.hidden_dim)\n ### test\n # self.out_linear = nn.Linear(5*self.hidden_dim, self.hidden_dim)\n if self.opt.with_position:\n self.position_embed = nn.Embedding(self.story_size, self.embed_dim)\n # else:\n \n # # 线性层 + 门控\n # self.linear_read = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.Sigmoid())\n # self.linear_write = nn.Sequential(nn.Linear(self.hidden_dim, self.hidden_dim), nn.Sigmoid())\n # self.linear_mem = nn.Linear(self.hidden_dim * 2, self.hidden_dim)\n \n self.project_layer = nn.Linear(self.hidden_dim * 2, self.embed_dim)\n self.relu = nn.ReLU()\n\n def init_hidden(self, batch_size, bi, dim):\n # LSTM的初始隐状态,默认为0\n weight = next(self.parameters()).data\n times = 2 if bi else 1\n if self.rnn_type == 'gru':\n return weight.new(self.num_layers * times, batch_size, dim).zero_()\n else:\n return (weight.new(self.num_layers * times, batch_size, dim).zero_(),\n weight.new(self.num_layers * times, batch_size, dim).zero_())\n\n def forward(self, input, hidden=None):\n\n batch_size, story_size = input.size(0), input.size(1) # (batch_size, 5, feat_size)\n emb = self.visual_emb(input.view(-1, self.feat_size)) # 过一个线性层,2048-512\n emb = emb.view(batch_size, story_size, -1) # view变回三维 64*5*512\n\n # if self.opt.context_dec:\n # position = torch.tensor(list(range(story_size))).repeat(batch_size, 1).cuda()\n # position = self.position_enc(position)\n # enc_output = emb + position\n # # context, attention = self.attention(attin, attin, attin)\n\n # # att = self.pos_ffn(context)\n # for enc_layer in self.layer_stack:\n # enc_output, enc_slf_attn = enc_layer(enc_output)\n # # emb = self.transformer_encoder(attin)\n # emb = enc_output\n\n rnn_input = self.hin_dropout_layer(emb) # apply dropout\n # if hidden is None:\n # hidden = self.init_hidden(batch_size, bi=True, dim=self.hidden_dim // 2) # 最后一个维度为512/2=256\n \n houts, hidden = self.rnn(rnn_input) # hidden [2,64,512]\n \n out = emb + self.project_layer(houts)# 原始的 visual_emb + rnn输出的结果, 即残差连接, 改为concat?\n\n out = self.relu(out) # (batch_size, 5, embed_dim)\n state = (hidden[0].unsqueeze(0), hidden[1].unsqueeze(0))\n if self.opt.with_position:\n position = torch.tensor([[0,1,2,3,4]]).repeat(batch_size, 1).cuda()\n pos = self.position_embed(position)\n out = out + pos\n heads = None\n if self.opt.dec:\n result = [] \n if self.opt.mem: \n if self.opt.att: \n if self.opt.multihead:\n mem, query, heads = self.focus(out, out) # heads 64*4*128\n else:\n mem, query = self.focus(out, out)\n else:\n mem = out\n mem = mem.sum(dim=1) # 64*512\n # self.attention.init_context(out)\n for i in range(self.story_size):\n # graph_res = graph_attn(self.opt.alpha, state[0].squeeze(), out, self.story_size) # 64*6*1\n # graph_res = torch.matmul(out.transpose(1, 2), weights).squeeze()\n # att, _ = self.attention(state[0].squeeze())\n # g_r = self.linear_read(torch.cat([state[0].squeeze(), query], dim=-1))\n g_w = torch.ones(batch_size, state[0].size(-1)).cuda()\n \n g_r = self.linear_read(state[0].squeeze())\n a = torch.mean(g_r, dim=-1)\n m = g_w * g_r\n b = torch.mean(torch.mean(g_r, dim=-1))\n f = torch.max(g_r, dim=-1)\n mem_inp = g_r * mem\n inp = torch.cat((out[:, i, :], mem_inp), 1)\n inp = self.linear_mem(inp).unsqueeze(1) # 64*1*512\n output, state = self.rnn_dec(inp, state)\n if self.opt.is_write:\n g_w = self.linear_write(state[0].squeeze())\n mem = g_w * mem\n # mem -= mem_inp\n c = torch.mean(g_w, dim=-1)\n e = torch.max(g_w, dim=-1)\n d = torch.mean(torch.mean(g_w, dim=-1)) \n result.append(output.squeeze())\n out = torch.stack(result).transpose(0, 1)\n else:\n for i in range(self.story_size):\n output, state = self.rnn_dec(out[:, i, :].unsqueeze(1), state)\n result.append(output.squeeze())\n out = torch.stack(result).transpose(0, 1)\n\n return out, state, heads\n\nclass CaptionEncoder(nn.Module):\n\n def __init__(self, opt):\n super(CaptionEncoder, self).__init__()\n # embedding (input) layer options\n self.opt = opt\n self.embed_dim = opt.word_embed_dim\n \n # rnn layer options\n self.rnn_type = opt.rnn_type\n self.num_layers = opt.num_layers\n self.hidden_dim = opt.hidden_dim\n self.dropout = opt.visual_dropout\n self.story_size = opt.story_size\n if self.opt.cnn_cap:\n self.sw1 = nn.Sequential(nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=1, padding=0), nn.BatchNorm1d(opt.hidden_dim), nn.ReLU())\n self.sw3 = nn.Sequential(nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=1, padding=0), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim),\n nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim))\n self.sw33 = nn.Sequential(nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=1, padding=0), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim),\n nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim),\n nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim))\n self.linear = nn.Sequential(nn.Linear(2*opt.hidden_dim, 2*opt.hidden_dim), nn.GLU(), nn.Dropout(opt.dropout))\n self.filter_linear = nn.Linear(3*opt.hidden_dim, opt.hidden_dim)\n self.tanh = nn.Tanh()\n self.sigmoid = nn.Sigmoid()\n self.cnn = nn.Sequential(nn.Conv1d(opt.hidden_dim, opt.hidden_dim, kernel_size=3, padding=1), nn.ReLU(), nn.BatchNorm1d(opt.hidden_dim))\n else:\n self.rnn = nn.GRU(input_size=self.embed_dim, hidden_size=self.hidden_dim, bidirectional=opt.bi, batch_first=True)\n if opt.bi:\n self.out_linear = nn.Sequential(nn.Linear(2*opt.hidden_dim, 2*opt.hidden_dim), nn.GLU(), nn.Dropout(opt.dropout))\n self.sigmoid = nn.Sigmoid()\n self.attention = luong_gate_attention(self.hidden_dim, self.embed_dim)\n\n def forward(self, input, embed):\n # input: 64*5*20,分别对每句话进行卷积,提取句子特征\n batch = input.size(0)\n input = input.view(batch*5, -1)\n mask = torch.zeros_like(input)\n mask = input > 0 # batch*5,20\n src_len = torch.sum(mask, dim=-1) # =batch*5\n input = embed(input)\n state = None\n if self.opt.cnn_cap:\n input = input.transpose(1, 2) # 320*512*20,卷积在最后一个维度扫\n\n # outputs = self.cnn(input)\n # outputs = outputs.transpose(1, 2)\n\n conv1 = self.sw1(input)\n conv3 = self.sw3(input)\n conv33 = self.sw33(input)\n conv = torch.cat((conv1, conv3, conv33), 1).transpose(1, 2)\n conv = self.filter_linear(conv) # 320*20*512\n outputs = conv\n if self.opt.self_att: # 对句子内部进行自注意力,提取关键词特征\n self.attention.init_context(input.transpose(1, 2).transpose(0, 1))\n att_out, weights = self.attention(input.transpose(1, 2), selfatt=True)\n gate = self.sigmoid(att_out.transpose(0, 1))\n outputs = gate * conv\n else: \n ## gru+self-att\n # lengths, indices = torch.sort(src_len, dim=0, descending=True)\n # input = torch.index_select(input, dim=0, index=indices) # batch*5,20,512\n # embs = torch.nn.utils.rnn.pack_padded_sequence(input, lengths, batch_first=True)\n # outputs, state = self.rnn(embs)\n # outputs = torch.nn.utils.rnn.pad_packed_sequence(outputs)[0] # 19,batch*5,512\n # outputs = outputs.transpose(0,1)\n # # 排列为之前的顺序\n # _, ind = torch.sort(indices)\n # outputs = torch.index_select(outputs, dim=0, index=ind) # batch*5,seq_len,512\n # state = torch.index_select(state.squeeze(), dim=0, index=ind)\n # if self.opt.self_att:\n # self.attention.init_context(outputs.transpose(0, 1))\n # outputs, weights = self.attention(outputs, selfatt=True)\n # outputs = outputs.transpose(0, 1)\n\n ## 先self——att,再gru\n if self.opt.self_att:\n self.attention.init_context(input.transpose(0, 1))\n outputs, weights = self.attention(input, selfatt=True)\n outputs = outputs.transpose(0, 1)\n else:\n outputs = input\n lengths, indices = torch.sort(src_len, dim=0, descending=True)\n outputs = torch.index_select(outputs, dim=0, index=indices) # batch*5,20,512\n embs = torch.nn.utils.rnn.pack_padded_sequence(outputs, lengths, batch_first=True)\n outputs, state = self.rnn(embs) # state(2,320,512)\n outputs = torch.nn.utils.rnn.pad_packed_sequence(outputs)[0] # 19,batch*5,512\n if self.opt.bi:\n outputs = self.out_linear(outputs.transpose(0,1))\n else:\n outputs = outputs.transpose(0,1)\n # 排列为之前的顺序\n _, ind = torch.sort(indices)\n outputs = torch.index_select(outputs, dim=0, index=ind) # batch*5,seq_len,512\n state = torch.index_select(state[0].squeeze(), dim=0, index=ind)\n\n return outputs, state\n\n\ndef graph_attention(alpha, cen_state, adj_state, max_len):\n \"\"\"\n graph attention. calculate the graph attention score for cen_state\n\n Args:\n alpha: float hyper parameters\n cen_state: tensor acts as central node batch_size * 1 * hidden_dim\n other_state: tensor acts as adjacent node batch_size * max_len * hidden_dim\n M: tensor learned param matrix hidden_dim * hidden_dim\n max_len: int maximum number of adjacent node\n Returns:\n socre: tensor batch_size * max_len\n \"\"\"\n batch_size = cen_state.shape[0]\n hidden_dim = cen_state.shape[-1]\n # concatenate 将解码节点与编码节点拼接,构成图\n state = torch.cat((cen_state.unsqueeze(1), adj_state), dim=1) # batch_size * max_len + 1 * hidden_dim\n \n M = nn.Linear(hidden_dim, hidden_dim).cuda() \n W = M(state) # batch_size * max_len + 1 * hidden_dim\n W = torch.matmul(state, W.transpose(1, 2)) # batch_size * max_len + 1 * max_len + 1\n \n W_sum = torch.sum(W, dim=2) # batch_size * max_len + 1\n W_sum = torch.unsqueeze(W_sum, -1) # batch_size * max_len + 1 * 1\n W_sum = W_sum.repeat((1, 1, max_len + 1)) # batch_size * max_len + 1 * max_len + 1\n \n D = torch.eye(max_len + 1).cuda() # max_len + 1 * max_len + 1\n D = torch.unsqueeze(D, 0) # 1 * max_len + 1 * max_len + 1\n D = D.repeat((batch_size, 1, 1)) * W_sum # batch_size * max_len + 1 * max_len + 1 点乘\n P = alpha * torch.matmul(W, torch.inverse(D[:])) # batch_size * max_len + 1 * max_len + 1\n\n I = torch.unsqueeze(torch.eye(max_len + 1), 0).cuda() # 1 * max_len + 1 * max_len + 1\n I = I.repeat(batch_size, 1, 1) - P # batch_size * max_len + 1 * max_len + 1\n Q = torch.inverse(I[:]) # batch_size * max_len + 1 * max_len + 1\n \n Y = torch.cat((torch.ones((batch_size, 1)), torch.zeros(batch_size, max_len)), 1).cuda() # batch_size * max_len + 1\n Y = torch.unsqueeze(Y, -1)\n score = (1 - alpha) * torch.matmul(Q, Y)\n score = F.softmax(score[:, 1:], dim=1) # 64*6*1\n\n return score\n\n"
] | [
[
"numpy.asarray"
],
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.nn.GLU",
"torch.max",
"torch.zeros",
"torch.cat",
"torch.nn.GRU",
"torch.sum",
"torch.nn.Embedding",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.FloatTensor",
"torch.nn.Dropout",
"torch.ones",
"torch.eye",
"torch.nn.utils.rnn.pack_padded_sequence",
"numpy.sin",
"torch.inverse",
"torch.nn.Sigmoid",
"torch.tensor",
"numpy.argpartition",
"torch.sort",
"numpy.unravel_index",
"torch.index_select",
"torch.nn.BatchNorm1d",
"numpy.power",
"torch.zeros_like",
"torch.unsqueeze",
"torch.nn.Linear",
"torch.nn.Conv1d",
"torch.stack",
"numpy.argsort",
"torch.nn.LSTM",
"numpy.cos",
"torch.nn.LayerNorm",
"torch.nn.Tanh",
"torch.matmul",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
neuralmagic/yolact | [
"68ea8f6edcc0d61047a95071fa22d8d271164605"
] | [
"layers/box_utils.py"
] | [
"# -*- coding: utf-8 -*-\nimport torch\nfrom utils import timer\n\nfrom data import cfg\n\[email protected]\ndef point_form(boxes):\n \"\"\" Convert prior_boxes to (xmin, ymin, xmax, ymax)\n representation for comparison to point form ground truth data.\n Args:\n boxes: (tensor) center-size default boxes from priorbox layers.\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin\n boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax\n\n\[email protected]\ndef center_size(boxes):\n \"\"\" Convert prior_boxes to (cx, cy, w, h)\n representation for comparison to center-size form ground truth data.\n Args:\n boxes: (tensor) point_form boxes\n Return:\n boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes.\n \"\"\"\n return torch.cat(( (boxes[:, 2:] + boxes[:, :2])/2, # cx, cy\n boxes[:, 2:] - boxes[:, :2] ), 1) # w, h\n\[email protected]\ndef intersect(box_a, box_b):\n \"\"\" We resize both tensors to [A,B,2] without new malloc:\n [A,2] -> [A,1,2] -> [A,B,2]\n [B,2] -> [1,B,2] -> [A,B,2]\n Then we compute the area of intersect between box_a and box_b.\n Args:\n box_a: (tensor) bounding boxes, Shape: [n,A,4].\n box_b: (tensor) bounding boxes, Shape: [n,B,4].\n Return:\n (tensor) intersection area, Shape: [n,A,B].\n \"\"\"\n n = box_a.size(0)\n A = box_a.size(1)\n B = box_b.size(1)\n max_xy = torch.min(box_a[:, :, 2:].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, 2:].unsqueeze(1).expand(n, A, B, 2))\n min_xy = torch.max(box_a[:, :, :2].unsqueeze(2).expand(n, A, B, 2),\n box_b[:, :, :2].unsqueeze(1).expand(n, A, B, 2))\n return torch.clamp(max_xy - min_xy, min=0).prod(3) # inter\n\n\ndef jaccard(box_a, box_b, iscrowd:bool=False):\n \"\"\"Compute the jaccard overlap of two sets of boxes. The jaccard overlap\n is simply the intersection over union of two boxes. Here we operate on\n ground truth boxes and default boxes. If iscrowd=True, put the crowd in box_b.\n E.g.:\n A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B)\n Args:\n box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4]\n box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4]\n Return:\n jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)]\n \"\"\"\n use_batch = True\n if box_a.dim() == 2:\n use_batch = False\n box_a = box_a[None, ...]\n box_b = box_b[None, ...]\n\n inter = intersect(box_a, box_b)\n area_a = ((box_a[:, :, 2]-box_a[:, :, 0]) *\n (box_a[:, :, 3]-box_a[:, :, 1])).unsqueeze(2).expand_as(inter) # [A,B]\n area_b = ((box_b[:, :, 2]-box_b[:, :, 0]) *\n (box_b[:, :, 3]-box_b[:, :, 1])).unsqueeze(1).expand_as(inter) # [A,B]\n union = area_a + area_b - inter\n\n out = inter / area_a if iscrowd else inter / union\n return out if use_batch else out.squeeze(0)\n\ndef elemwise_box_iou(box_a, box_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the inner dimension. \"\"\"\n max_xy = torch.min(box_a[:, 2:], box_b[:, 2:])\n min_xy = torch.max(box_a[:, :2], box_b[:, :2])\n inter = torch.clamp((max_xy - min_xy), min=0)\n inter = inter[:, 0] * inter[:, 1]\n\n area_a = (box_a[:, 2] - box_a[:, 0]) * (box_a[:, 3] - box_a[:, 1])\n area_b = (box_b[:, 2] - box_b[:, 0]) * (box_b[:, 3] - box_b[:, 1])\n\n union = area_a + area_b - inter\n union = torch.clamp(union, min=0.1)\n\n # Return value is [n] for inputs [n, 4]\n return torch.clamp(inter / union, max=1)\n\ndef mask_iou(masks_a, masks_b, iscrowd=False, device='cpu'):\n \"\"\"\n Computes the pariwise mask IoU between two sets of masks of size [a, h, w] and [b, h, w].\n The output is of size [a, b].\n\n Wait I thought this was \"box_utils\", why am I putting this in here?\n \"\"\"\n\n masks_a = masks_a.view(masks_a.size(0), -1)\n masks_b = masks_b.view(masks_b.size(0), -1)\n masks_a = masks_a.to(device)\n masks_b = masks_b.to(device)\n intersection = masks_a.to(device) @ masks_b.t().to(device)\n area_a = masks_a.sum(dim=1).unsqueeze(1)\n area_b = masks_b.sum(dim=1).unsqueeze(0)\n\n return intersection / (area_a + area_b - intersection) if not iscrowd else intersection / area_a\n\ndef elemwise_mask_iou(masks_a, masks_b):\n \"\"\" Does the same as above but instead of pairwise, elementwise along the outer dimension. \"\"\"\n masks_a = masks_a.view(-1, masks_a.size(-1))\n masks_b = masks_b.view(-1, masks_b.size(-1))\n\n intersection = (masks_a * masks_b).sum(dim=0)\n area_a = masks_a.sum(dim=0)\n area_b = masks_b.sum(dim=0)\n\n # Return value is [n] for inputs [h, w, n]\n return torch.clamp(intersection / torch.clamp(area_a + area_b - intersection, min=0.1), max=1)\n\n\n\ndef change(gt, priors):\n \"\"\"\n Compute the d_change metric proposed in Box2Pix:\n https://lmb.informatik.uni-freiburg.de/Publications/2018/UB18/paper-box2pix.pdf\n \n Input should be in point form (xmin, ymin, xmax, ymax).\n\n Output is of shape [num_gt, num_priors]\n Note this returns -change so it can be a drop in replacement for \n \"\"\"\n num_priors = priors.size(0)\n num_gt = gt.size(0)\n\n gt_w = (gt[:, 2] - gt[:, 0])[:, None].expand(num_gt, num_priors)\n gt_h = (gt[:, 3] - gt[:, 1])[:, None].expand(num_gt, num_priors)\n\n gt_mat = gt[:, None, :].expand(num_gt, num_priors, 4)\n pr_mat = priors[None, :, :].expand(num_gt, num_priors, 4)\n\n diff = gt_mat - pr_mat\n diff[:, :, 0] /= gt_w\n diff[:, :, 2] /= gt_w\n diff[:, :, 1] /= gt_h\n diff[:, :, 3] /= gt_h\n\n return -torch.sqrt( (diff ** 2).sum(dim=2) )\n\n\n\n\ndef match(pos_thresh, neg_thresh, truths, priors, labels, crowd_boxes, loc_t, conf_t, idx_t, idx, loc_data):\n \"\"\"Match each prior box with the ground truth box of the highest jaccard\n overlap, encode the bounding boxes, then return the matched indices\n corresponding to both confidence and location preds.\n Args:\n pos_thresh: (float) IoU > pos_thresh ==> positive.\n neg_thresh: (float) IoU < neg_thresh ==> negative.\n truths: (tensor) Ground truth boxes, Shape: [num_obj, num_priors].\n priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4].\n labels: (tensor) All the class labels for the image, Shape: [num_obj].\n crowd_boxes: (tensor) All the crowd box annotations or None if there are none.\n loc_t: (tensor) Tensor to be filled w/ endcoded location targets.\n conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. Note: -1 means neutral.\n idx_t: (tensor) Tensor to be filled w/ the index of the matched gt box for each prior.\n idx: (int) current batch index.\n loc_data: (tensor) The predicted bbox regression coordinates for this batch.\n Return:\n The matched indices corresponding to 1)location and 2)confidence preds.\n \"\"\"\n decoded_priors = decode(loc_data, priors, cfg.use_yolo_regressors) if cfg.use_prediction_matching else point_form(priors)\n \n # Size [num_objects, num_priors]\n overlaps = jaccard(truths, decoded_priors) if not cfg.use_change_matching else change(truths, decoded_priors)\n\n # Size [num_priors] best ground truth for each prior\n best_truth_overlap, best_truth_idx = overlaps.max(0)\n\n # We want to ensure that each gt gets used at least once so that we don't\n # waste any training data. In order to do that, find the max overlap anchor\n # with each gt, and force that anchor to use that gt.\n for _ in range(overlaps.size(0)):\n # Find j, the gt with the highest overlap with a prior\n # In effect, this will loop through overlaps.size(0) in a \"smart\" order,\n # always choosing the highest overlap first.\n best_prior_overlap, best_prior_idx = overlaps.max(1)\n j = best_prior_overlap.max(0)[1]\n\n # Find i, the highest overlap anchor with this gt\n i = best_prior_idx[j]\n\n # Set all other overlaps with i to be -1 so that no other gt uses it\n overlaps[:, i] = -1\n # Set all other overlaps with j to be -1 so that this loop never uses j again\n overlaps[j, :] = -1\n\n # Overwrite i's score to be 2 so it doesn't get thresholded ever\n best_truth_overlap[i] = 2\n # Set the gt to be used for i to be j, overwriting whatever was there\n best_truth_idx[i] = j\n\n matches = truths[best_truth_idx] # Shape: [num_priors,4]\n conf = labels[best_truth_idx] + 1 # Shape: [num_priors]\n\n conf[best_truth_overlap < pos_thresh] = -1 # label as neutral\n conf[best_truth_overlap < neg_thresh] = 0 # label as background\n\n # Deal with crowd annotations for COCO\n if crowd_boxes is not None and cfg.crowd_iou_threshold < 1:\n # Size [num_priors, num_crowds]\n crowd_overlaps = jaccard(decoded_priors, crowd_boxes, iscrowd=True)\n # Size [num_priors]\n best_crowd_overlap, best_crowd_idx = crowd_overlaps.max(1)\n # Set non-positives with crowd iou of over the threshold to be neutral.\n conf[(conf <= 0) & (best_crowd_overlap > cfg.crowd_iou_threshold)] = -1\n\n loc = encode(matches, priors, cfg.use_yolo_regressors)\n loc_t[idx] = loc # [num_priors,4] encoded offsets to learn\n conf_t[idx] = conf # [num_priors] top class label for each prior\n idx_t[idx] = best_truth_idx # [num_priors] indices for lookup\n\[email protected]\ndef encode(matched, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Encode bboxes matched with each prior into the format\n produced by the network. See decode for more details on\n this format. Note that encode(decode(x, p), p) = x.\n \n Args:\n - matched: A tensor of bboxes in point form with shape [num_priors, 4]\n - priors: The tensor of all priors with shape [num_priors, 4]\n Return: A tensor with encoded relative coordinates in the format\n outputted by the network (see decode). Size: [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Exactly the reverse of what we did in decode\n # In fact encode(decode(x, p), p) should be x\n boxes = center_size(matched)\n\n loc = torch.cat((\n boxes[:, :2] - priors[:, :2],\n torch.log(boxes[:, 2:] / priors[:, 2:])\n ), 1)\n else:\n variances = [0.1, 0.2]\n\n # dist b/t match center and prior's center\n g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2]\n # encode variance\n g_cxcy /= (variances[0] * priors[:, 2:])\n # match wh / prior wh\n g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:]\n g_wh = torch.log(g_wh) / variances[1]\n # return target for smooth_l1_loss\n loc = torch.cat([g_cxcy, g_wh], 1) # [num_priors,4]\n \n return loc\n\[email protected]\ndef decode(loc, priors, use_yolo_regressors:bool=False):\n \"\"\"\n Decode predicted bbox coordinates using the same scheme\n employed by Yolov2: https://arxiv.org/pdf/1612.08242.pdf\n\n b_x = (sigmoid(pred_x) - .5) / conv_w + prior_x\n b_y = (sigmoid(pred_y) - .5) / conv_h + prior_y\n b_w = prior_w * exp(loc_w)\n b_h = prior_h * exp(loc_h)\n \n Note that loc is inputed as [(s(x)-.5)/conv_w, (s(y)-.5)/conv_h, w, h]\n while priors are inputed as [x, y, w, h] where each coordinate\n is relative to size of the image (even sigmoid(x)). We do this\n in the network by dividing by the 'cell size', which is just\n the size of the convouts.\n \n Also note that prior_x and prior_y are center coordinates which\n is why we have to subtract .5 from sigmoid(pred_x and pred_y).\n \n Args:\n - loc: The predicted bounding boxes of size [num_priors, 4]\n - priors: The priorbox coords with size [num_priors, 4]\n \n Returns: A tensor of decoded relative coordinates in point form \n form with size [num_priors, 4]\n \"\"\"\n\n if use_yolo_regressors:\n # Decoded boxes in center-size notation\n boxes = torch.cat((\n loc[:, :2] + priors[:, :2],\n priors[:, 2:] * torch.exp(loc[:, 2:])\n ), 1)\n\n boxes = point_form(boxes)\n else:\n variances = [0.1, 0.2]\n \n boxes = torch.cat((\n priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],\n priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)\n boxes[:, :2] -= boxes[:, 2:] / 2\n boxes[:, 2:] += boxes[:, :2]\n \n return boxes\n\n\n\ndef log_sum_exp(x):\n \"\"\"Utility function for computing log_sum_exp while determining\n This will be used to determine unaveraged confidence loss across\n all examples in a batch.\n Args:\n x (Variable(tensor)): conf_preds from conf layers\n \"\"\"\n x_max = x.data.max()\n return torch.log(torch.sum(torch.exp(x-x_max), 1)) + x_max\n\n\[email protected]\ndef sanitize_coordinates(_x1, _x2, img_size:int, padding:int=0, cast:bool=True):\n \"\"\"\n Sanitizes the input coordinates so that x1 < x2, x1 != x2, x1 >= 0, and x2 <= image_size.\n Also converts from relative to absolute coordinates and casts the results to long tensors.\n\n If cast is false, the result won't be cast to longs.\n Warning: this does things in-place behind the scenes so copy if necessary.\n \"\"\"\n _x1 = _x1 * img_size\n _x2 = _x2 * img_size\n if cast:\n _x1 = _x1.long()\n _x2 = _x2.long()\n x1 = torch.min(_x1, _x2)\n x2 = torch.max(_x1, _x2)\n x1 = torch.clamp(x1-padding, min=0)\n x2 = torch.clamp(x2+padding, max=img_size)\n\n return x1, x2\n\n\[email protected]\ndef crop(masks, boxes, padding:int=1):\n \"\"\"\n \"Crop\" predicted masks by zeroing out everything not in the predicted bbox.\n Vectorized by Chong (thanks Chong).\n\n Args:\n - masks should be a size [h, w, n] tensor of masks\n - boxes should be a size [n, 4] tensor of bbox coords in relative point form\n \"\"\"\n h, w, n = masks.size()\n x1, x2 = sanitize_coordinates(boxes[:, 0], boxes[:, 2], w, padding, cast=False)\n y1, y2 = sanitize_coordinates(boxes[:, 1], boxes[:, 3], h, padding, cast=False)\n\n rows = torch.arange(w, device=masks.device, dtype=x1.dtype).view(1, -1, 1).expand(h, w, n)\n cols = torch.arange(h, device=masks.device, dtype=x1.dtype).view(-1, 1, 1).expand(h, w, n)\n \n masks_left = rows >= x1.view(1, 1, -1)\n masks_right = rows < x2.view(1, 1, -1)\n masks_up = cols >= y1.view(1, 1, -1)\n masks_down = cols < y2.view(1, 1, -1)\n \n crop_mask = masks_left * masks_right * masks_up * masks_down\n \n return masks * crop_mask.float()\n\n\ndef index2d(src, idx):\n \"\"\"\n Indexes a tensor by a 2d index.\n\n In effect, this does\n out[i, j] = src[i, idx[i, j]]\n \n Both src and idx should have the same size.\n \"\"\"\n\n offs = torch.arange(idx.size(0), device=idx.device)[:, None].expand_as(idx)\n idx = idx + offs * idx.size(1)\n\n return src.view(-1)[idx.view(-1)].view(idx.size())\n"
] | [
[
"torch.max",
"torch.cat",
"torch.min",
"torch.exp",
"torch.log",
"torch.arange",
"torch.clamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dimitrymindlin/DenseNetMuraPytorch | [
"ef3a872d739b015e3618c00265acb481dc251342"
] | [
"pipeline.py"
] | [
"import os\nimport pandas as pd\nfrom tqdm import tqdm\nimport torch\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader, Dataset\nfrom torchvision.datasets.folder import pil_loader\n\nfrom configs.mura_config import mura_config\n\ndata_cat = ['train', 'valid'] # data categories\n\ndef get_study_level_data(study_types):\n \"\"\"\n Returns a dict, with keys 'train' and 'valid' and respective values as study level dataframes, \n these dataframes contain three columns 'Path', 'Count', 'Label'\n Args:\n study_type (list): one or many of the seven study type folder names in 'train/valid/test' dataset\n \"\"\"\n study_data = {}\n study_label = {mura_config['data']['class_names'][1]: 1, mura_config['data']['class_names'][0]: 0}\n for phase in data_cat:\n for study_type in study_types:\n BASE_DIR = 'mura/%s/%s/' % (phase, study_type)\n patients = list(os.walk(BASE_DIR))[0][1] # list of patient folder names\n study_data[phase] = pd.DataFrame(columns=['Path', 'Count', 'Label'])\n i = 0\n for patient in tqdm(patients): # for each patient folder\n for study in os.listdir(BASE_DIR + patient): # for each study in that patient folder\n label = study_label[study.split('_')[1]] # get label 0 or 1\n path = BASE_DIR + patient + '/' + study + '/' # path to this study\n study_data[phase].loc[i] = [path, len(os.listdir(path)), label] # add new row\n i+=1\n return study_data\n\nclass ImageDataset(Dataset):\n \"\"\"training dataset.\"\"\"\n\n def __init__(self, df, transform=None):\n \"\"\"\n Args:\n df (pd.DataFrame): a pandas DataFrame with image path and labels.\n transform (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n self.df = df\n self.transform = transform\n\n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n study_path = self.df.iloc[idx, 0]\n count = self.df.iloc[idx, 1]\n images = []\n for i in range(count):\n image = pil_loader(study_path + 'image%s.png' % (i+1))\n images.append(self.transform(image))\n images = torch.stack(images)\n label = self.df.iloc[idx, 2]\n sample = {'images': images, 'label': label}\n return sample\n\ndef get_dataloaders(data, batch_size=8, study_level=False):\n '''\n Returns dataloader pipeline with data augmentation\n '''\n if mura_config['train']['augmentation']:\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((mura_config['data']['image_height'], mura_config['data']['image_width'])),\n transforms.RandomHorizontalFlip(),\n transforms.RandomRotation(10),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n else:\n data_transforms = {\n 'train': transforms.Compose([\n transforms.Resize((mura_config['data']['image_height'], mura_config['data']['image_width'])),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'valid': transforms.Compose([\n transforms.Resize((224, 224)),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n image_datasets = {x: ImageDataset(data[x], transform=data_transforms[x]) for x in data_cat}\n dataloaders = {x: DataLoader(image_datasets[x], batch_size=batch_size, shuffle=True, num_workers=4) for x in data_cat}\n return dataloaders\n\nif __name__=='main':\n pass\n"
] | [
[
"torch.stack",
"torch.utils.data.DataLoader",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
ruidongjr/Aldi | [
"0d2dad1ab180abb59bee15d9e5e851e4de4e8cd5"
] | [
"libraries/deep_sort/deep_sort/deep/feature_extractor.py"
] | [
"import torch\nimport torchvision.transforms as transforms\nimport numpy as np\nimport cv2\nimport logging\n\nfrom .model import Net\n\nclass Extractor(object):\n def __init__(self, model_path, use_cuda=True):\n self.net = Net(reid=True)\n self.device = \"cuda\" if torch.cuda.is_available() and use_cuda else \"cpu\"\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)['net_dict']\n self.net.load_state_dict(state_dict)\n logger = logging.getLogger(\"root.tracker\")\n logger.info(\"Loading weights from {}... Done!\".format(model_path))\n self.net.to(self.device)\n self.size = (64, 128)\n self.norm = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),\n ])\n \n\n\n def _preprocess(self, im_crops):\n \"\"\"\n TODO:\n 1. to float with scale from 0 to 1\n 2. resize to (64, 128) as Market1501 dataset did\n 3. concatenate to a numpy array\n 3. to torch Tensor\n 4. normalize\n \"\"\"\n def _resize(im, size):\n return cv2.resize(im.astype(np.float32)/255., size)\n\n im_batch = torch.cat([self.norm(_resize(im, self.size)).unsqueeze(0) for im in im_crops], dim=0).float()\n return im_batch\n\n\n def __call__(self, im_crops):\n im_batch = self._preprocess(im_crops)\n with torch.no_grad():\n im_batch = im_batch.to(self.device)\n features = self.net(im_batch)\n return features.cpu().numpy()\n\n\nif __name__ == '__main__':\n img = cv2.imread(\"demo.jpg\")[:,:,(2,1,0)]\n extr = Extractor(\"checkpoint/ckpt.t7\")\n feature = extr(img)\n"
] | [
[
"torch.no_grad",
"torch.cuda.is_available",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aalbersk/DeepRec | [
"f673a950780959b44dcda99398880a1d883ab338",
"f673a950780959b44dcda99398880a1d883ab338",
"f673a950780959b44dcda99398880a1d883ab338",
"f673a950780959b44dcda99398880a1d883ab338"
] | [
"sparse_operation_kit/unit_test/test_scripts/tf1/test_sparse_emb_demo.py",
"tensorflow/python/ops/prefetch.py",
"modelzoo/WDL/train.py",
"modelzoo/SOK/DLRM/model/strategy_wrapper.py"
] | [
"\"\"\"\n Copyright (c) 2021, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport argparse\nimport sys, os\nsys.path.append(os.path.abspath(os.path.join(\n os.path.dirname(os.path.abspath(__file__)), \"../../../\")))\n# os.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"2\"\nimport sparse_operation_kit as sok\nimport tensorflow as tf\nimport utils\nfrom sparse_models import SOKDemo, TFDemo\nfrom test_dense_emb_demo import check_saved_embedding_variables\nimport strategy_wrapper\nimport numpy as np\n\n\ndef get_sok_results(args, init_tensors, *random_samples):\n if args.distributed_tool == \"onedevice\":\n strategy = strategy_wrapper.OneDeviceStrategy()\n elif args.distributed_tool == \"horovod\":\n import horovod.tensorflow as hvd\n hvd.init()\n strategy = strategy_wrapper.HorovodStrategy()\n else:\n raise ValueError(f\"{args.distributed_tool} is not supported.\")\n\n with strategy.scope():\n sok_init_op = sok.Init(global_batch_size=args.global_batch_size)\n\n sok_sparse_demo = SOKDemo(max_vocabulary_size_per_gpu=args.max_vocabulary_size_per_gpu,\n embedding_vec_size=args.embedding_vec_size,\n combiner=args.combiner,\n slot_num=args.slot_num,\n max_nnz=args.max_nnz,\n use_hashtable=args.use_hashtable,\n num_of_dense_layers=0)\n \n emb_opt = utils.get_embedding_optimizer(args.optimizer)(learning_rate=0.1)\n dense_opt = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)\n\n sok_saver = sok.Saver()\n restore_op = list()\n for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):\n control_inputs = [restore_op[-1]] if restore_op else None\n with tf.control_dependencies(control_inputs):\n if args.restore_params:\n filepath = r\"./embedding_variables\"\n op = sok_saver.restore_from_file(embedding_layer.embedding_variable, filepath)\n else:\n op = sok_saver.load_embedding_values(embedding_layer.embedding_variable, init_tensors[i])\n restore_op.append(op)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True, reduction=\"none\")\n def _replica_loss(labels, logits):\n loss = loss_fn(labels, logits)\n return tf.nn.compute_average_loss(loss, global_batch_size=args.global_batch_size)\n\n def _train_step(inputs, labels, training):\n def _step_fn(inputs, labels):\n logit, embedding_vector = sok_sparse_demo(inputs, training=training)\n loss = _replica_loss(labels, logit)\n emb_var, other_var = sok.split_embedding_variable_from_others(sok_sparse_demo.trainable_variables)\n grads = tf.gradients(loss, emb_var + other_var, colocate_gradients_with_ops=True,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n emb_grads, other_grads = grads[:len(emb_var)], grads[len(emb_var):]\n if \"plugin\" in args.optimizer:\n emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))\n else:\n with sok.OptimizerScope(emb_var):\n emb_train_op = emb_opt.apply_gradients(zip(emb_grads, emb_var))\n with tf.control_dependencies([*emb_grads]):\n # in case NCCL runs concurrently via SOK and horovod\n other_grads = strategy.reduce(\"sum\", other_grads)\n other_train_op = dense_opt.apply_gradients(zip(other_grads, other_var))\n\n with tf.control_dependencies([emb_train_op, other_train_op]):\n total_loss = strategy.reduce(\"sum\", loss)\n total_loss = tf.identity(total_loss)\n return total_loss, embedding_vector\n return strategy.run(_step_fn, inputs, labels)\n\n replica_batch_size = args.global_batch_size // args.gpu_num\n dataset = utils.tf_dataset(*random_samples, batchsize=replica_batch_size,\n to_sparse_tensor=True, repeat=1)\n train_iterator = dataset.make_initializable_iterator()\n iterator_init = train_iterator.initializer\n\n inputs, labels = train_iterator.get_next()\n graph_results = _train_step(inputs, labels, training=True)\n \n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n if \"plugin\" in args.optimizer:\n init_op = tf.group(init_op, emb_opt.initializer)\n\n save_op = list()\n for i, embedding_layer in enumerate(sok_sparse_demo.embedding_layers):\n control_inputs = [save_op[-1]] if save_op else None\n with tf.control_dependencies(control_inputs):\n if args.save_params:\n filepath = r\"./embedding_variables/\"\n utils.try_make_dirs(filepath)\n op = sok_saver.dump_to_file(embedding_layer.embedding_variable, filepath)\n else:\n op = tf.constant(1.0)\n save_op.append(op)\n\n sok_results = list()\n\n with tf.Session() as sess:\n sess.run(sok_init_op)\n sess.run([init_op, iterator_init])\n sess.run(restore_op)\n sess.graph.finalize()\n\n for step in range(args.iter_num):\n loss_v, emb_vector_v = sess.run([*graph_results])\n print(\"*\" * 80)\n print(f\"Step: {step}, loss: {loss_v}, embedding_vector:\\n{emb_vector_v}\")\n sok_results.append(emb_vector_v)\n\n sess.run(save_op)\n\n name = list()\n for embedding_layer in sok_sparse_demo.embedding_layers:\n name.append(embedding_layer.embedding_variable.m_var_name)\n \n return sok_results, name\n\ndef get_tf_results(args, init_tensors, *random_samples):\n graph = tf.Graph()\n with graph.as_default():\n tf_sparse_demo = TFDemo(vocabulary_size=args.max_vocabulary_size_per_gpu * args.gpu_num,\n embedding_vec_size=args.embedding_vec_size,\n combiner=args.combiner,\n slot_num=args.slot_num,\n max_nnz=args.max_nnz,\n use_hashtable=args.use_hashtable,\n num_of_dense_layers=0)\n \n optimizer = utils.get_dense_optimizer(args.optimizer)(learning_rate=0.1)\n\n loss_fn = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n def _train_step(inputs, labels, training):\n logit, embedding_vector = tf_sparse_demo(inputs, training=training)\n loss = loss_fn(labels, logit)\n grads = tf.gradients(loss, tf_sparse_demo.trainable_variables,\n colocate_gradients_with_ops=True,\n unconnected_gradients=tf.UnconnectedGradients.NONE)\n train_op = optimizer.apply_gradients(zip(grads, tf_sparse_demo.trainable_variables))\n with tf.control_dependencies([train_op]):\n loss = tf.identity(loss)\n return loss, embedding_vector\n\n\n dataset = utils.tf_dataset(*random_samples, batchsize=args.global_batch_size,\n to_sparse_tensor=True, repeat=1)\n train_iterator = dataset.make_initializable_iterator()\n iterator_init = train_iterator.initializer\n\n inputs, labels = train_iterator.get_next()\n graph_results = _train_step(inputs, labels, training=True)\n\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n\n restore_op = list()\n for i, embedding_weight in enumerate(tf_sparse_demo.embedding_weights):\n restore_op.append(embedding_weight.assign(tf.concat(init_tensors[i], axis=0)))\n\n emb_values = list()\n for embedding_weight in tf_sparse_demo.embedding_weights:\n if args.save_params:\n filepath = r\"./embedding_variables/\"\n utils.try_make_dirs(filepath)\n emb_values.append(embedding_weight.read_value())\n else:\n emb_values = tf.constant(1.0)\n\n tf_results = list()\n with tf.Session(graph=graph) as sess:\n sess.run([init_op, iterator_init])\n sess.run(restore_op)\n sess.graph.finalize()\n\n for step in range(args.iter_num):\n loss_v, emb_vector_v = sess.run([*graph_results])\n print(\"*\" * 80)\n print(f\"step: {step}, loss: {loss_v}, embedding_vector:\\n{emb_vector_v}\")\n tf_results.append(emb_vector_v)\n\n emb_values_v = sess.run(emb_values)\n if args.save_params:\n for i, value in enumerate(emb_values_v):\n utils.save_to_file(os.path.join(filepath, r\"tf_variable_\" + str(i) + r\".file\"),\n value)\n \n name = list()\n for embedding_weight in tf_sparse_demo.embedding_weights:\n name.append(embedding_weight.name)\n\n return tf_results, name\n\ndef compare_sparse_emb_sok_with_tf(args):\n if args.global_batch_size % args.gpu_num != 0:\n raise ValueError(f\"global_batch_size: {args.global_batch_size} is not divisible \"\n f\"by gpu_num: {args.gpu_num}\")\n\n if args.use_hashtable:\n vocabulary_size = args.max_vocabulary_size_per_gpu * args.gpu_num\n else:\n vocabulary_size = args.max_vocabulary_size_per_gpu\n\n if args.generate_new_datas:\n replica_batch_size = args.global_batch_size // args.gpu_num\n random_samples = utils.generate_random_samples(num_of_samples=replica_batch_size * args.iter_num,\n vocabulary_size=vocabulary_size,\n slot_num=sum(args.slot_num),\n max_nnz=args.max_nnz,\n use_sparse_mask=True)\n utils.save_to_file(r\"./random_samples_\" + str(args.rank_idx) + r\".file\", *random_samples)\n else:\n random_samples = utils.restore_from_file(r\"./random_samples_\" + str(args.rank_idx) + r\".file\")\n\n if args.restore_params:\n filepath = r\"./embedding_variables\"\n # because we already checked the variable consistency when saving\n # so that we can directly use TF Variable file to initialize\n # TF's Variable and SOK's Variable\n init_tensors = list()\n for i in range(len(args.slot_num)):\n tf_values_filename = os.path.join(filepath, r\"tf_variable_\" + str(i) + r\".file\")\n init_tensors.append(utils.restore_from_file(tf_values_filename))\n else:\n init_tensors = list()\n for i in range(len(args.slot_num)):\n init_tensors.append(utils.get_ones_tensor(max_vocab_size_per_gpu=args.max_vocabulary_size_per_gpu,\n embedding_vec_size=args.embedding_vec_size[i],\n num=args.gpu_num))\n sok_results, variable_names = get_sok_results(args, init_tensors, *random_samples)\n utils.save_to_file(r\"./sok_embedding_vectors_\" + str(args.rank_idx) + r\".file\", *sok_results)\n\n if args.rank_idx != 0:\n return\n\n # aggregate dataset from different worker\n dataset_filenames = [r\"./random_samples_\" + str(rank_idx) + r\".file\"\n for rank_idx in range(args.rank_size)]\n random_samples_total = [list() for _ in range(args.iter_num)]\n random_labels_total = [list() for _ in range(args.iter_num)]\n local_batch_size = args.global_batch_size // args.gpu_num\n for rank_idx in range(args.rank_size):\n samples, labels = utils.restore_from_file(dataset_filenames[rank_idx])\n for i in range(args.iter_num):\n random_samples_total[i].extend(samples[i * local_batch_size : (i + 1) * local_batch_size])\n random_labels_total[i].extend(labels[i * local_batch_size : (i + 1) * local_batch_size])\n random_samples_total = np.concatenate(random_samples_total, axis=0)\n random_labels_total = np.concatenate(random_labels_total, axis=0)\n\n tf_results, _ = get_tf_results(args, init_tensors, random_samples_total, random_labels_total)\n\n # aggregate sok forward results from different worker\n sok_results_filenames = [r\"./sok_embedding_vectors_\" + str(rank_idx) + r\".file\"\n for rank_idx in range(args.rank_size)]\n sok_results_total = list()\n for filename in sok_results_filenames:\n sok_results = utils.restore_from_file(filename)\n sok_results_total.append(sok_results)\n\n if len(sok_results_total[0]) != len(tf_results):\n raise ValueError(\"The length of sok results is not equal to that of tensorflow.\")\n if len(sok_results) != args.iter_num:\n raise ValueError(\"The length of embedding vectors: %d is not equal to iteration number: %d.\"\n %(len(sok_results), args.iter_num))\n\n rtol, atol = 1e-3, 1e-3\n if args.restore_params:\n rtol, atol = rtol * 10, atol * 10\n if args.distributed_tool == \"horovod\":\n rtol, atol = rtol * 10, atol * 10\n for i in range(args.iter_num):\n sok_vector = np.concatenate([sok_results_total[rank_idx][i]\n for rank_idx in range(args.rank_size)], axis=0)\n allclose = np.allclose(sok_vector, tf_results[i], rtol=rtol, atol=atol)\n if not allclose:\n raise ValueError(f\"\\n{sok_vector} \\nis not near to \\n{tf_results[i]} \\nat rtol={rtol}, atol={atol}\")\n\n print(f\"\\n[INFO]: For {len(args.slot_num)} Sparse Embedding layer, using {args.gpu_num} GPUs + {args.optimizer} optimizer, \"\n f\"using hashtable? {args.use_hashtable}, combiner = {args.combiner}, the embedding vectors\"\n f\" obtained from sok and tf are consistent for {args.iter_num} iterations.\")\n\n if args.save_params:\n check_saved_embedding_variables(args, variable_names,\n use_hashtable=args.use_hashtable, gpu_num=args.gpu_num)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--gpu_num\", type=int, required=False, default=1)\n parser.add_argument(\"--distributed_tool\", type=str, required=False, \n choices=[\"horovod\", \"onedevice\"], default=\"onedevice\")\n parser.add_argument(\"--iter_num\", type=int, required=False, default=50)\n parser.add_argument(\"--max_vocabulary_size_per_gpu\", type=int,\n required=False, default=1024)\n parser.add_argument(\"--combiner\", type=str, required=False, default=\"sum\",\n choices=[\"sum\", \"mean\"])\n parser.add_argument(\"--slot_num\", type=int, nargs=\"+\",\n help=\"the number of feature fileds\",\n required=False, default=1)\n parser.add_argument(\"--max_nnz\", type=int,\n help=\"the maximum of valid inputs\",\n required=False, default=1)\n parser.add_argument(\"--embedding_vec_size\", type=int, nargs=\"+\",\n required=False, default=1)\n parser.add_argument(\"--global_batch_size\", type=int, required=False,\n default=16)\n parser.add_argument(\"--optimizer\", type=str, required=False, \n default=\"adam\", choices=[\"plugin_adam\", \"adam\", \"sgd\", \"compat_adam\"])\n parser.add_argument(\"--generate_new_datas\", type=int, choices=[0, 1],\n required=False, default=1)\n parser.add_argument(\"--save_params\", type=int, choices=[0, 1],\n required=False, default=1)\n parser.add_argument(\"--restore_params\", type=int, choices=[0, 1],\n required=False, default=0)\n parser.add_argument(\"--use_hashtable\", type=int, choices=[0, 1],\n required=False, default=1)\n\n args = parser.parse_args()\n\n args.generate_new_datas = True if args.generate_new_datas == 1 else False\n args.save_params = True if args.save_params == 1 else False\n args.restore_params = True if args.restore_params == 1 else False\n args.use_hashtable = True if args.use_hashtable == 1 else False\n\n if (args.distributed_tool == \"onedevice\" and args.gpu_num != 1):\n raise ValueError(f\"When 'onedevice' is used as the distributed_tool, \"\n f\"gpu_num must be 1, which is {args.gpu_num}\")\n\n if args.distributed_tool == \"onedevice\":\n available_gpus = \",\".join(map(str, range(args.gpu_num)))\n rank_size = args.gpu_num\n rank_idx = 0\n else:\n # gpu_num will be ignored.\n rank_size = os.getenv(\"OMPI_COMM_WORLD_SIZE\")\n if rank_size is None:\n raise ValueError(f\"When distributed_tool is set to {args.distributed_tool}, \"\n \"mpiexec / mpirun must be used to launch this program.\")\n rank_size = int(rank_size)\n rank_idx = int(os.getenv(\"OMPI_COMM_WORLD_RANK\"))\n\n available_gpus = str(rank_idx)\n\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = available_gpus\n\n args.rank_size = rank_size\n args.rank_idx = rank_idx\n args.gpu_num = rank_size\n\n compare_sparse_emb_sok_with_tf(args)",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Prefetching samples asynchronously.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nfrom tensorflow.python.framework import sparse_tensor\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.util import nest\nfrom tensorflow.python.util.tf_export import tf_export\n\n\nfrom tensorflow.python.ops import gen_tensor_buffer_ops\nfrom tensorflow.python.ops.prefetch_runner import PrefetchRunner\n\nops.NotDifferentiable('TensorBufferPut')\nops.NotDifferentiable('TensorBufferTake')\nops.NotDifferentiable('TensorBufferCancel')\n\nPREFETCH = \"prefetch\"\n\n@tf_export(v1=[\"make_prefetch_hook\"])\ndef make_prefetch_hook(daemon=True, start=True):\n \"\"\"Create PrefetchRunner.Hook for prefetching.\n\n Args:\n daemon: (Optional.) Whether the threads should be marked as `daemons`,\n meaning they don't block program exit.\n start: (Optional.) If `False` threads would not be started.\n\n Returns:\n A PrefetchRunner.Hook for prefetching.\n \"\"\"\n return PrefetchRunner.Hook(PREFETCH, daemon=daemon, start=start)\n\n@tf_export(v1=[\"staged\"])\ndef staged(\n features,\n feed_list=None,\n feed_generator=None,\n capacity=1,\n num_threads=1,\n num_clients=1,\n timeout_millis=300000,\n closed_exception_types=None,\n ignored_exception_types=None,\n name=None):\n \"\"\"Prefetch samples.\n\n Args:\n features: Nest structure of tensors to prefetch.\n feed_list: (Optional.) A list of `feed_dict` keys. See\n @{tf.Session.run} for details of the allowable feed key types.\n feed_generator: (Optional.) A generator function lambda sess: iterator\n that yields a list of `feed_dict` values.\n capacity: (Optional.) Max number of samples to keep in the buffer.\n num_threads: (Optional.) Number of threads for prefetching. 1 by\n default.\n num_clients: (Optional.) Number of clients of prefetched sample. 1 by\n default.\n timeout_millis: (Optional.) Max milliseconds put op can take, 5 min by\n default.\n closed_exception_types: (Optional.) Exception types indicating that the\n prefetching is normally finished. Defaults to\n `(tf.errors.OutOfRangeError, StopIteration)`.\n ignored_exception_types: (Optional.) Exception types indicating that the\n prefetching can continue. Defaults to `()`.\n name: (Optional.) Name of prefetching operations.\n\n Returns:\n Prefetched sample.\n \"\"\"\n if num_threads < 1:\n raise ValueError('num_threads must >= 1')\n\n if name is None:\n name = ops.get_default_graph().unique_name(PREFETCH)\n with ops.name_scope(name):\n local_device = control_flow_ops.no_op().device\n tensor_or_sparse_tensor_or_nones = nest.flatten(features)\n\n tensor_or_nones = []\n for t in tensor_or_sparse_tensor_or_nones:\n if hasattr(t, 'dense_shape'):\n tensor_or_nones.extend([t.values, t.indices, t.dense_shape])\n else:\n tensor_or_nones.append(t)\n\n tensor_indices = []\n tensors = []\n for i, v in enumerate(tensor_or_nones):\n if v is not None:\n tensor_indices.append(i)\n tensors.append(v)\n tensor_dtypes = []\n tensor_shapes = []\n for v in tensors:\n tensor_dtypes.append(v.dtype)\n tensor_shapes.append(v.shape if hasattr(v, 'shape') else None)\n\n with ops.name_scope(name):\n with ops.device(local_device):\n fetch_tensors = gen_tensor_buffer_ops.tensor_buffer_put(\n tensors,\n timeout_millis=timeout_millis,\n shared_name=name,\n shared_capacity=capacity)\n cancel_fetching = gen_tensor_buffer_ops.tensor_buffer_cancel(\n shared_name=name,\n shared_capacity=capacity)\n resume_fetching = gen_tensor_buffer_ops.tensor_buffer_cancel(\n is_cancelled=False,\n shared_name=name,\n shared_capacity=capacity)\n close_fetching = gen_tensor_buffer_ops.tensor_buffer_close(\n shared_name=name,\n shared_capacity=capacity)\n next_tensors = gen_tensor_buffer_ops.tensor_buffer_take(\n dtypes=tensor_dtypes,\n shared_name=name,\n shared_capacity=capacity,\n shared_threads=num_clients)\n if not isinstance(next_tensors, (tuple, list)):\n next_tensors = [next_tensors]\n next_tensors = [array_ops.identity(t) for t in next_tensors]\n for i, t in enumerate(next_tensors):\n t.set_shape(tensor_shapes[i])\n next_tensor_or_nones = [None] * len(tensor_or_nones)\n for i, v in enumerate(next_tensors):\n next_tensor_or_nones[tensor_indices[i]] = v\n next_tensor_or_nones = collections.deque(next_tensor_or_nones)\n next_tensor_or_sparse_tensor_or_nones = []\n for t in tensor_or_sparse_tensor_or_nones:\n if hasattr(t, 'dense_shape'):\n sparse_values = next_tensor_or_nones.popleft()\n sparse_indices = next_tensor_or_nones.popleft()\n sparse_dense_shape = next_tensor_or_nones.popleft()\n next_tensor_or_sparse_tensor_or_nones.append(\n sparse_tensor.SparseTensor(\n values=sparse_values,\n indices=sparse_indices,\n dense_shape=sparse_dense_shape))\n else:\n next_tensor_or_sparse_tensor_or_nones.append(\n next_tensor_or_nones.popleft())\n prefetched = nest.pack_sequence_as(\n features, next_tensor_or_sparse_tensor_or_nones)\n runner = PrefetchRunner(\n fetch_ops=[fetch_tensors] * num_threads,\n cancel_op=cancel_fetching,\n resume_op=resume_fetching,\n close_op=close_fetching,\n feed_list=feed_list,\n feed_generator=feed_generator,\n closed_exception_types=closed_exception_types,\n ignored_exception_types=ignored_exception_types)\n ops.add_to_collection(PREFETCH, runner)\n return prefetched\n\n@tf_export(v1=[\"prefetch_join\"])\ndef prefetch_join(\n thread_to_features,\n feed_list=None,\n feed_generator=None,\n capacity=1,\n num_clients=1,\n timeout_millis=300000,\n closed_exception_types=None,\n ignored_exception_types=None,\n name=None):\n \"\"\"Prefetch samples from thread_to_features list.\n\n `Unlike `prefetch`, `prefetch_join` runs different ops in different threads.\n `prefetch_join` can be used to support datasets with many sources.\n\n Args:\n thread_to_features: List of nest structure of tensors for each thread.\n feed_list: (Optional.) A list of `feed_dict` keys. See\n @{tf.Session.run} for details of the allowable feed key types.\n feed_generator: (Optional.) A generator function lambda sess: iterator\n that yields a list of `feed_dict` values.\n capacity: (Optional.) Max number of samples to keep in the buffer.\n num_clients: (Optional.) Number of clients of prefetched sample. 1 by\n default.\n timeout_millis: (Optional.) Max milliseconds put op can take, 5 min by\n default.\n closed_exception_types: (Optional.) Exception types indicating that the\n prefetching is normally finished. Defaults to\n `(tf.errors.OutOfRangeError, StopIteration)`.\n ignored_exception_types: (Optional.) Exception types indicating that the\n prefetching can continue. Defaults to `()`.\n name: (Optional.) Name of prefetching operations.\n\n Returns:\n Prefetched sample.\n \"\"\"\n if len(thread_to_features) < 1:\n raise ValueError('thread_to_features must has at least one element')\n\n if name is None:\n name = ops.get_default_graph().unique_name(PREFETCH)\n with ops.name_scope(name):\n local_device = control_flow_ops.no_op().device\n with ops.device(local_device):\n cancel_fetching = gen_tensor_buffer_ops.tensor_buffer_cancel(\n shared_name=name,\n shared_capacity=capacity)\n resume_fetching = gen_tensor_buffer_ops.tensor_buffer_cancel(\n is_cancelled=False,\n shared_name=name,\n shared_capacity=capacity)\n close_fetching = gen_tensor_buffer_ops.tensor_buffer_close(\n shared_name=name,\n shared_capacity=capacity)\n\n thread_to_tensor_dtypes = []\n thread_to_tensor_shapes = []\n thread_to_tensor_or_sparse_tensor_or_nones = []\n thread_to_tensor_or_nones = []\n thread_to_fetch_tensors = []\n for features in thread_to_features:\n tensor_or_sparse_tensor_or_nones = nest.flatten(features)\n thread_to_tensor_or_sparse_tensor_or_nones.append(\n tensor_or_sparse_tensor_or_nones)\n\n tensor_or_nones = []\n for t in tensor_or_sparse_tensor_or_nones:\n if hasattr(t, 'dense_shape'):\n tensor_or_nones.extend([t.values, t.indices, t.dense_shape])\n else:\n tensor_or_nones.append(t)\n thread_to_tensor_or_nones.append(tensor_or_nones)\n\n tensor_indices = []\n tensors = []\n for i, v in enumerate(tensor_or_nones):\n if v is not None:\n tensor_indices.append(i)\n tensors.append(v)\n tensor_dtypes = []\n tensor_shapes = []\n for v in tensors:\n tensor_dtypes.append(v.dtype)\n tensor_shapes.append(v.shape if hasattr(v, 'shape') else None)\n thread_to_tensor_dtypes.append(tensor_dtypes)\n thread_to_tensor_shapes.append(tensor_shapes)\n\n with ops.name_scope(name):\n with ops.device(local_device):\n fetch_tensors = gen_tensor_buffer_ops.tensor_buffer_put(\n tensors,\n timeout_millis=timeout_millis,\n shared_name=name,\n shared_capacity=capacity)\n thread_to_fetch_tensors.append(fetch_tensors)\n\n with ops.name_scope(name):\n with ops.device(local_device):\n next_tensors = gen_tensor_buffer_ops.tensor_buffer_take(\n dtypes=thread_to_tensor_dtypes[0],\n shared_name=name,\n shared_capacity=capacity,\n shared_threads=num_clients)\n if not isinstance(next_tensors, (tuple, list)):\n next_tensors = [next_tensors]\n next_tensors = [array_ops.identity(t) for t in next_tensors]\n for i, t in enumerate(next_tensors):\n t.set_shape(thread_to_tensor_shapes[0][i])\n next_tensor_or_nones = [None for _ in thread_to_tensor_or_nones[0]]\n for i, v in enumerate(next_tensors):\n next_tensor_or_nones[tensor_indices[i]] = v\n next_tensor_or_nones = collections.deque(next_tensor_or_nones)\n next_tensor_or_sparse_tensor_or_nones = []\n for t in thread_to_tensor_or_sparse_tensor_or_nones[0]:\n if hasattr(t, 'dense_shape'):\n sparse_values = next_tensor_or_nones.popleft()\n sparse_indices = next_tensor_or_nones.popleft()\n sparse_dense_shape = next_tensor_or_nones.popleft()\n next_tensor_or_sparse_tensor_or_nones.append(\n sparse_tensor.SparseTensor(\n values=sparse_values,\n indices=sparse_indices,\n dense_shape=sparse_dense_shape))\n else:\n next_tensor_or_sparse_tensor_or_nones.append(\n next_tensor_or_nones.popleft())\n prefetched = nest.pack_sequence_as(\n thread_to_features[0], next_tensor_or_sparse_tensor_or_nones)\n\n runner = PrefetchRunner(\n fetch_ops=thread_to_fetch_tensors,\n cancel_op=cancel_fetching,\n resume_op=resume_fetching,\n close_op=close_fetching,\n feed_list=feed_list,\n feed_generator=feed_generator,\n closed_exception_types=closed_exception_types,\n ignored_exception_types=ignored_exception_types)\n ops.add_to_collection(PREFETCH, runner)\n return prefetched\n",
"# Copyright (c) 2022 Intel Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport time\nimport argparse\nimport tensorflow as tf\nimport os\nimport sys\nimport math\nimport collections\nfrom tensorflow.python.client import timeline\nimport json\n\nfrom tensorflow.python.ops import partitioned_variables\n\n# Set to INFO for tracking training, default is WARN. ERROR for least messages\ntf.logging.set_verbosity(tf.logging.INFO)\nprint(\"Using TensorFlow version %s\" % (tf.__version__))\n\n# Definition of some constants\nCONTINUOUS_COLUMNS = ['I' + str(i) for i in range(1, 14)] # 1-13 inclusive\nCATEGORICAL_COLUMNS = ['C' + str(i) for i in range(1, 27)] # 1-26 inclusive\nLABEL_COLUMN = ['clicked']\nTRAIN_DATA_COLUMNS = LABEL_COLUMN + CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\nFEATURE_COLUMNS = CONTINUOUS_COLUMNS + CATEGORICAL_COLUMNS\nHASH_BUCKET_SIZES = {\n 'C1': 2500,\n 'C2': 2000,\n 'C3': 300000,\n 'C4': 250000,\n 'C5': 1000,\n 'C6': 100,\n 'C7': 20000,\n 'C8': 4000,\n 'C9': 20,\n 'C10': 100000,\n 'C11': 10000,\n 'C12': 250000,\n 'C13': 40000,\n 'C14': 100,\n 'C15': 100,\n 'C16': 200000,\n 'C17': 50,\n 'C18': 10000,\n 'C19': 4000,\n 'C20': 20,\n 'C21': 250000,\n 'C22': 100,\n 'C23': 100,\n 'C24': 250000,\n 'C25': 400,\n 'C26': 100000\n}\n\nEMBEDDING_DIMENSIONS = {\n 'C1': 64,\n 'C2': 64,\n 'C3': 128,\n 'C4': 128,\n 'C5': 64,\n 'C6': 64,\n 'C7': 64,\n 'C8': 64,\n 'C9': 64,\n 'C10': 128,\n 'C11': 64,\n 'C12': 128,\n 'C13': 64,\n 'C14': 64,\n 'C15': 64,\n 'C16': 128,\n 'C17': 64,\n 'C18': 64,\n 'C19': 64,\n 'C20': 64,\n 'C21': 128,\n 'C22': 64,\n 'C23': 64,\n 'C24': 128,\n 'C25': 64,\n 'C26': 128\n}\n\n\nclass WDL():\n def __init__(self,\n wide_column=None,\n deep_column=None,\n dnn_hidden_units=[1024, 512, 256],\n optimizer_type='adam',\n linear_learning_rate=0.2,\n deep_learning_rate=0.01,\n inputs=None,\n bf16=False,\n stock_tf=None,\n adaptive_emb=False,\n input_layer_partitioner=None,\n dense_layer_partitioner=None):\n if not inputs:\n raise ValueError(\"Dataset is not defined.\")\n self._feature = inputs[0]\n self._label = inputs[1]\n\n self._wide_column = wide_column\n self._deep_column = deep_column\n if not wide_column or not deep_column:\n raise ValueError(\"Wide column or Deep column is not defined.\")\n\n self.tf = stock_tf\n self.bf16 = False if self.tf else bf16\n self.is_training = True\n self._adaptive_emb = adaptive_emb\n\n self._dnn_hidden_units = dnn_hidden_units\n self._linear_learning_rate = linear_learning_rate\n self._deep_learning_rate = deep_learning_rate\n self._optimizer_type = optimizer_type\n self._input_layer_partitioner = input_layer_partitioner\n self._dense_layer_partitioner = dense_layer_partitioner\n\n self._create_model()\n with tf.name_scope('head'):\n self._create_loss()\n self._create_optimizer()\n self._create_metrics()\n\n # used to add summary in tensorboard\n def _add_layer_summary(self, value, tag):\n tf.summary.scalar('%s/fraction_of_zero_values' % tag,\n tf.nn.zero_fraction(value))\n tf.summary.histogram('%s/activation' % tag, value)\n\n def _dnn(self, dnn_input, dnn_hidden_units=None, layer_name=''):\n for layer_id, num_hidden_units in enumerate(dnn_hidden_units):\n with tf.variable_scope(layer_name + '_%d' % layer_id,\n partitioner=self._dense_layer_partitioner,\n reuse=tf.AUTO_REUSE) as dnn_layer_scope:\n dnn_input = tf.layers.dense(\n dnn_input,\n units=num_hidden_units,\n activation=tf.nn.relu,\n kernel_initializer=tf.glorot_uniform_initializer(),\n name=dnn_layer_scope)\n\n self._add_layer_summary(dnn_input, dnn_layer_scope.name)\n return dnn_input\n\n # create model\n def _create_model(self):\n # Dnn part\n with tf.variable_scope('dnn'):\n # input layer\n with tf.variable_scope('input_from_feature_columns',\n partitioner=self._input_layer_partitioner,\n reuse=tf.AUTO_REUSE):\n if self._adaptive_emb and not self.tf:\n '''Adaptive Embedding Feature Part 1 of 2'''\n adaptive_mask_tensors = {}\n for col in CATEGORICAL_COLUMNS:\n adaptive_mask_tensors[col] = tf.ones([args.batch_size],\n tf.int32)\n net = tf.feature_column.input_layer(\n features=self._feature,\n feature_columns=self._deep_column,\n adaptive_mask_tensors=adaptive_mask_tensors)\n else:\n net = tf.feature_column.input_layer(\n features=self._feature,\n feature_columns=self._deep_column)\n self._add_layer_summary(net, 'input_from_feature_columns')\n\n # hidden layers\n dnn_scope = tf.variable_scope('dnn_layers', \\\n partitioner=self._dense_layer_partitioner, reuse=tf.AUTO_REUSE)\n with dnn_scope.keep_weights(dtype=tf.float32) if self.bf16 \\\n else dnn_scope:\n if self.bf16:\n net = tf.cast(net, dtype=tf.bfloat16)\n\n net = self._dnn(net, self._dnn_hidden_units, 'hiddenlayer')\n\n if self.bf16:\n net = tf.cast(net, dtype=tf.float32)\n\n # dnn logits\n logits_scope = tf.variable_scope('logits')\n with logits_scope.keep_weights(dtype=tf.float32) if self.bf16 \\\n else logits_scope as dnn_logits_scope:\n dnn_logits = tf.layers.dense(net,\n units=1,\n activation=None,\n name=dnn_logits_scope)\n self._add_layer_summary(dnn_logits, dnn_logits_scope.name)\n\n # linear part\n with tf.variable_scope(\n 'linear', partitioner=self._dense_layer_partitioner) as scope:\n linear_logits = tf.feature_column.linear_model(\n units=1,\n features=self._feature,\n feature_columns=self._wide_column,\n sparse_combiner='sum',\n weight_collections=None,\n trainable=True)\n\n self._add_layer_summary(linear_logits, scope.name)\n\n self._logits = tf.add_n([dnn_logits, linear_logits])\n self.probability = tf.math.sigmoid(self._logits)\n self.output = tf.round(self.probability)\n\n # compute loss\n def _create_loss(self):\n self._logits = tf.squeeze(self._logits)\n self.loss = tf.losses.sigmoid_cross_entropy(\n self._label,\n self._logits,\n scope='loss',\n reduction=tf.losses.Reduction.SUM_OVER_BATCH_SIZE)\n tf.summary.scalar('loss', self.loss)\n\n # define optimizer and generate train_op\n def _create_optimizer(self):\n self.global_step = tf.train.get_or_create_global_step()\n if self.tf or self._optimizer_type == 'adam':\n dnn_optimizer = tf.train.AdamOptimizer(\n learning_rate=self._deep_learning_rate,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8)\n elif self._optimizer_type == 'adagrad':\n dnn_optimizer = tf.train.AdagradOptimizer(\n learning_rate=self._deep_learning_rate,\n initial_accumulator_value=0.1,\n use_locking=False)\n elif self._optimizer_type == 'adamasync':\n dnn_optimizer = tf.train.AdamAsyncOptimizer(\n learning_rate=self._deep_learning_rate,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8)\n elif self._optimizer_type == 'adagraddecay':\n dnn_optimizer = tf.train.AdagradDecayOptimizer(\n learning_rate=self._deep_learning_rate,\n global_step=self.global_step)\n else:\n raise ValueError(\"Optimzier type error.\")\n\n linear_optimizer = tf.train.FtrlOptimizer(\n learning_rate=self._linear_learning_rate,\n l1_regularization_strength=0.0,\n l2_regularization_strength=0.0)\n train_ops = []\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_ops.append(\n dnn_optimizer.minimize(self.loss,\n var_list=tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='dnn'),\n global_step=self.global_step))\n train_ops.append(\n linear_optimizer.minimize(self.loss,\n var_list=tf.get_collection(\n tf.GraphKeys.TRAINABLE_VARIABLES,\n scope='linear')))\n self.train_op = tf.group(*train_ops)\n\n # compute acc & auc\n def _create_metrics(self):\n self.acc, self.acc_op = tf.metrics.accuracy(labels=self._label,\n predictions=self.output)\n self.auc, self.auc_op = tf.metrics.auc(labels=self._label,\n predictions=self.probability,\n num_thresholds=1000)\n tf.summary.scalar('eval_acc', self.acc)\n tf.summary.scalar('eval_auc', self.auc)\n\n\n# generate dataset pipline\ndef build_model_input(filename, batch_size, num_epochs):\n def parse_csv(value):\n tf.logging.info('Parsing {}'.format(filename))\n cont_defaults = [[0.0] for i in range(1, 14)]\n cate_defaults = [[' '] for i in range(1, 27)]\n label_defaults = [[0]]\n column_headers = TRAIN_DATA_COLUMNS\n record_defaults = label_defaults + cont_defaults + cate_defaults\n columns = tf.io.decode_csv(value, record_defaults=record_defaults)\n all_columns = collections.OrderedDict(zip(column_headers, columns))\n labels = all_columns.pop(LABEL_COLUMN[0])\n features = all_columns\n return features, labels\n\n '''Work Queue Feature'''\n if args.workqueue and not args.tf:\n from tensorflow.python.ops.work_queue import WorkQueue\n work_queue = WorkQueue([filename])\n # For multiple files:\n # work_queue = WorkQueue([filename, filename1,filename2,filename3])\n files = work_queue.input_dataset()\n else:\n files = filename\n # Extract lines from input files using the Dataset API.\n dataset = tf.data.TextLineDataset(files)\n dataset = dataset.shuffle(buffer_size=20000,\n seed=args.seed) # fix seed for reproducing\n dataset = dataset.repeat(num_epochs)\n dataset = dataset.batch(batch_size)\n dataset = dataset.map(parse_csv, num_parallel_calls=28)\n dataset = dataset.prefetch(2)\n return dataset\n\n\n# generate feature columns\ndef build_feature_columns():\n # Notes: Statistics of Kaggle's Criteo Dataset has been calculated in advance to save time.\n mins_list = [\n 0.0, -3.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0\n ]\n range_list = [\n 1539.0, 22069.0, 65535.0, 561.0, 2655388.0, 233523.0, 26297.0, 5106.0,\n 24376.0, 9.0, 181.0, 1807.0, 6879.0\n ]\n\n def make_minmaxscaler(min, range):\n def minmaxscaler(col):\n return (col - min) / range\n\n return minmaxscaler\n\n deep_columns = []\n wide_columns = []\n for column_name in FEATURE_COLUMNS:\n if column_name in CATEGORICAL_COLUMNS:\n categorical_column = tf.feature_column.categorical_column_with_hash_bucket(\n column_name, hash_bucket_size=10000, dtype=tf.string)\n wide_columns.append(categorical_column)\n\n if not args.tf:\n '''Feature Elimination of EmbeddingVariable Feature'''\n if args.ev_elimination == 'gstep':\n # Feature elimination based on global steps\n evict_opt = tf.GlobalStepEvict(steps_to_live=4000)\n elif args.ev_elimination == 'l2':\n # Feature elimination based on l2 weight\n evict_opt = tf.L2WeightEvict(l2_weight_threshold=1.0)\n else:\n evict_opt = None\n '''Feature Filter of EmbeddingVariable Feature'''\n if args.ev_filter == 'cbf':\n # CBF-based feature filter\n filter_option = tf.CBFFilter(\n filter_freq=3,\n max_element_size=2**30,\n false_positive_probability=0.01,\n counter_type=tf.int64)\n elif args.ev_filter == 'counter':\n # Counter-based feature filter\n filter_option = tf.CounterFilter(filter_freq=3)\n else:\n filter_option = None\n ev_opt = tf.EmbeddingVariableOption(\n evict_option=evict_opt, filter_option=filter_option)\n\n if args.ev:\n '''Embedding Variable Feature'''\n categorical_column = tf.feature_column.categorical_column_with_embedding(\n column_name, dtype=tf.string, ev_option=ev_opt)\n elif args.adaptive_emb:\n ''' Adaptive Embedding Feature Part 2 of 2\n Expcet the follow code, a dict, 'adaptive_mask_tensors', is need as the input of \n 'tf.feature_column.input_layer(adaptive_mask_tensors=adaptive_mask_tensors)'.\n For column 'COL_NAME',the value of adaptive_mask_tensors['$COL_NAME'] is a int32\n tensor with shape [batch_size].\n '''\n categorical_column = tf.feature_column.categorical_column_with_adaptive_embedding(\n column_name,\n hash_bucket_size=HASH_BUCKET_SIZES[column_name],\n dtype=tf.string,\n ev_option=ev_opt)\n elif args.dynamic_ev:\n '''Dynamic-dimension Embedding Variable'''\n print(\n \"Dynamic-dimension Embedding Variable isn't really enabled in model.\"\n )\n sys.exit()\n\n if args.tf or not args.emb_fusion:\n embedding_column = tf.feature_column.embedding_column(\n categorical_column,\n dimension=EMBEDDING_DIMENSIONS[column_name],\n combiner='mean')\n else:\n '''Embedding Fusion Feature'''\n embedding_column = tf.feature_column.embedding_column(\n categorical_column,\n dimension=EMBEDDING_DIMENSIONS[column_name],\n combiner='mean',\n do_fusion=args.emb_fusion)\n\n deep_columns.append(embedding_column)\n else:\n normalizer_fn = None\n i = CONTINUOUS_COLUMNS.index(column_name)\n normalizer_fn = make_minmaxscaler(mins_list[i], range_list[i])\n column = tf.feature_column.numeric_column(\n column_name, normalizer_fn=normalizer_fn, shape=(1, ))\n wide_columns.append(column)\n deep_columns.append(column)\n\n return wide_columns, deep_columns\n\n\ndef train(sess_config,\n input_hooks,\n model,\n data_init_op,\n steps,\n checkpoint_dir,\n tf_config=None,\n server=None):\n model.is_training = True\n hooks = []\n hooks.extend(input_hooks)\n\n scaffold = tf.train.Scaffold(\n local_init_op=tf.group(tf.local_variables_initializer(), data_init_op),\n saver=tf.train.Saver(max_to_keep=args.keep_checkpoint_max))\n\n stop_hook = tf.train.StopAtStepHook(last_step=steps)\n log_hook = tf.train.LoggingTensorHook(\n {\n 'steps': model.global_step,\n 'loss': model.loss\n }, every_n_iter=100)\n hooks.append(stop_hook)\n hooks.append(log_hook)\n if args.timeline > 0:\n hooks.append(\n tf.train.ProfilerHook(save_steps=args.timeline,\n output_dir=checkpoint_dir))\n save_steps = args.save_steps if args.save_steps or args.no_eval else steps\n '''\n Incremental_Checkpoint\n Please add `save_incremental_checkpoint_secs` in 'tf.train.MonitoredTrainingSession'\n it's default to None, Incremental_save checkpoint time in seconds can be set \n to use incremental checkpoint function, like `tf.train.MonitoredTrainingSession(\n save_incremental_checkpoint_secs=args.incremental_ckpt)`\n '''\n if args.incremental_ckpt and not args.tf:\n print(\"Incremental_Checkpoint is not really enabled.\")\n print(\"Please see the comments in the code.\")\n sys.exit()\n\n with tf.train.MonitoredTrainingSession(\n master=server.target if server else '',\n is_chief=tf_config['is_chief'] if tf_config else True,\n hooks=hooks,\n scaffold=scaffold,\n checkpoint_dir=checkpoint_dir,\n save_checkpoint_steps=save_steps,\n summary_dir=checkpoint_dir,\n save_summaries_steps=args.save_steps,\n config=sess_config) as sess:\n while not sess.should_stop():\n sess.run([model.loss, model.train_op])\n print(\"Training completed.\")\n\n\ndef eval(sess_config, input_hooks, model, data_init_op, steps, checkpoint_dir):\n model.is_training = False\n hooks = []\n hooks.extend(input_hooks)\n\n scaffold = tf.train.Scaffold(\n local_init_op=tf.group(tf.local_variables_initializer(), data_init_op))\n session_creator = tf.train.ChiefSessionCreator(\n scaffold=scaffold, checkpoint_dir=checkpoint_dir, config=sess_config)\n writer = tf.summary.FileWriter(os.path.join(checkpoint_dir, 'eval'))\n merged = tf.summary.merge_all()\n\n with tf.train.MonitoredSession(session_creator=session_creator,\n hooks=hooks) as sess:\n for _in in range(1, steps + 1):\n if (_in != steps):\n sess.run([model.acc_op, model.auc_op])\n if (_in % 1000 == 0):\n print(\"Evaluation complate:[{}/{}]\".format(_in, steps))\n else:\n eval_acc, eval_auc, events = sess.run(\n [model.acc_op, model.auc_op, merged])\n writer.add_summary(events, _in)\n print(\"Evaluation complate:[{}/{}]\".format(_in, steps))\n print(\"ACC = {}\\nAUC = {}\".format(eval_acc, eval_auc))\n\n\ndef main(tf_config=None, server=None):\n # check dataset and count data set size\n print(\"Checking dataset...\")\n train_file = args.data_location + '/train.csv'\n test_file = args.data_location + '/eval.csv'\n if (not os.path.exists(train_file)) or (not os.path.exists(test_file)):\n print(\"Dataset does not exist in the given data_location.\")\n sys.exit()\n no_of_training_examples = sum(1 for line in open(train_file))\n no_of_test_examples = sum(1 for line in open(test_file))\n print(\"Numbers of training dataset is {}\".format(no_of_training_examples))\n print(\"Numbers of test dataset is {}\".format(no_of_test_examples))\n\n # set batch size, eporch & steps\n batch_size = math.ceil(\n args.batch_size / args.micro_batch\n ) if args.micro_batch and not args.tf else args.batch_size\n\n if args.steps == 0:\n no_of_epochs = 1\n train_steps = math.ceil(\n (float(no_of_epochs) * no_of_training_examples) / batch_size)\n else:\n no_of_epochs = math.ceil(\n (float(batch_size) * args.steps) / no_of_training_examples)\n train_steps = args.steps\n test_steps = math.ceil(float(no_of_test_examples) / batch_size)\n print(\"The training steps is {}\".format(train_steps))\n print(\"The testing steps is {}\".format(test_steps))\n\n # set fixed random seed\n tf.set_random_seed(args.seed)\n\n # set directory path for checkpoint_dir\n model_dir = os.path.join(args.output_dir,\n 'model_WIDE_AND_DEEP_' + str(int(time.time())))\n checkpoint_dir = args.checkpoint if args.checkpoint else model_dir\n print(\"Saving model checkpoints to \" + checkpoint_dir)\n\n # create data pipline of train & test dataset\n train_dataset = build_model_input(train_file, batch_size, no_of_epochs)\n test_dataset = build_model_input(test_file, batch_size, 1)\n\n iterator = tf.data.Iterator.from_structure(train_dataset.output_types,\n test_dataset.output_shapes)\n next_element = iterator.get_next()\n\n train_init_op = iterator.make_initializer(train_dataset)\n test_init_op = iterator.make_initializer(test_dataset)\n\n # create feature column\n wide_column, deep_column = build_feature_columns()\n\n # create variable partitioner for distributed training\n num_ps_replicas = len(tf_config['ps_hosts']) if tf_config else 0\n input_layer_partitioner = partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=args.input_layer_partitioner <<\n 20) if args.input_layer_partitioner else None\n dense_layer_partitioner = partitioned_variables.min_max_variable_partitioner(\n max_partitions=num_ps_replicas,\n min_slice_size=args.dense_layer_partitioner <<\n 10) if args.dense_layer_partitioner else None\n\n # Session config\n sess_config = tf.ConfigProto()\n sess_config.inter_op_parallelism_threads = args.inter\n sess_config.intra_op_parallelism_threads = args.intra\n\n # Session hooks\n hooks = []\n\n if args.smartstaged and not args.tf:\n '''Smart staged Feature'''\n next_element = tf.staged(next_element, num_threads=4, capacity=40)\n sess_config.graph_options.optimizer_options.do_smart_stage = True\n hooks.append(tf.make_prefetch_hook())\n if args.op_fusion and not args.tf:\n '''Auto Graph Fusion'''\n sess_config.graph_options.optimizer_options.do_op_fusion = True\n if args.micro_batch and not args.tf:\n '''Auto Mirco Batch'''\n sess_config.graph_options.optimizer_options.micro_batch_num = args.micro_batch\n\n # create model\n model = WDL(wide_column=wide_column,\n deep_column=deep_column,\n linear_learning_rate=args.linear_learning_rate,\n deep_learning_rate=args.deep_learning_rate,\n optimizer_type=args.optimizer,\n bf16=args.bf16,\n stock_tf=args.tf,\n adaptive_emb=args.adaptive_emb,\n inputs=next_element,\n input_layer_partitioner=input_layer_partitioner,\n dense_layer_partitioner=dense_layer_partitioner)\n\n # Run model training and evaluation\n train(sess_config, hooks, model, train_init_op, train_steps,\n checkpoint_dir, tf_config, server)\n if not (args.no_eval or tf_config):\n eval(sess_config, hooks, model, test_init_op, test_steps,\n checkpoint_dir)\n\n\ndef boolean_string(string):\n low_string = string.lower()\n if low_string not in {'false', 'true'}:\n raise ValueError('Not a valid boolean string')\n return low_string == 'true'\n\n\n# Get parse\ndef get_arg_parser():\n parser = argparse.ArgumentParser()\n parser.add_argument('--data_location',\n help='Full path of train data',\n required=False,\n default='./data')\n parser.add_argument('--steps',\n help='set the number of steps on train dataset',\n type=int,\n default=0)\n parser.add_argument('--batch_size',\n help='Batch size to train. Default is 512',\n type=int,\n default=512)\n parser.add_argument('--output_dir',\n help='Full path to model output directory. \\\n Default to ./result. Covered by --checkpoint. ',\n required=False,\n default='./result')\n parser.add_argument('--checkpoint',\n help='Full path to checkpoints input/output. \\\n Default to ./result/$MODEL_TIMESTAMP',\n required=False)\n parser.add_argument('--save_steps',\n help='set the number of steps on saving checkpoints',\n type=int,\n default=0)\n parser.add_argument('--seed',\n help='set the random seed for tensorflow',\n type=int,\n default=2021)\n parser.add_argument('--optimizer',\n type=str, \\\n choices=['adam', 'adamasync', 'adagraddecay', 'adagrad'],\n default='adamasync')\n parser.add_argument('--linear_learning_rate',\n help='Learning rate for linear model',\n type=float,\n default=0.2)\n parser.add_argument('--deep_learning_rate',\n help='Learning rate for deep model',\n type=float,\n default=0.01)\n parser.add_argument('--keep_checkpoint_max',\n help='Maximum number of recent checkpoint to keep',\n type=int,\n default=1)\n parser.add_argument('--timeline',\n help='number of steps on saving timeline. Default 0',\n type=int,\n default=0)\n parser.add_argument('--protocol',\n type=str,\n choices=['grpc', 'grpc++', 'star_server'],\n default='grpc')\n parser.add_argument('--inter',\n help='set inter op parallelism threads.',\n type=int,\n default=0)\n parser.add_argument('--intra',\n help='set inter op parallelism threads.',\n type=int,\n default=0)\n parser.add_argument('--input_layer_partitioner', \\\n help='slice size of input layer partitioner, units MB. Default 8MB',\n type=int,\n default=8)\n parser.add_argument('--dense_layer_partitioner', \\\n help='slice size of dense layer partitioner, units KB. Default 16KB',\n type=int,\n default=16)\n parser.add_argument('--bf16',\n help='enable DeepRec BF16 in deep model. Default FP32',\n action='store_true')\n parser.add_argument('--no_eval',\n help='not evaluate trained model by eval dataset.',\n action='store_true')\n parser.add_argument('--tf', \\\n help='Use TF 1.15.5 API and disable DeepRec feature to run a baseline.',\n action='store_true')\n parser.add_argument('--smartstaged', \\\n help='Whether to enable smart staged feature of DeepRec, Default to True.',\n type=boolean_string,\n default=True)\n parser.add_argument('--emb_fusion', \\\n help='Whether to enable embedding fusion, Default to True.',\n type=boolean_string,\n default=True)\n parser.add_argument('--ev', \\\n help='Whether to enable DeepRec EmbeddingVariable. Default False.',\n type=boolean_string,\n default=False)\n parser.add_argument('--ev_elimination', \\\n help='Feature Elimination of EmbeddingVariable Feature. Default closed.',\n type=str,\n choices=[None, 'l2', 'gstep'],\n default=None)\n parser.add_argument('--ev_filter', \\\n help='Feature Filter of EmbeddingVariable Feature. Default closed.',\n type=str,\n choices=[None, 'counter', 'cbf'],\n default=None)\n parser.add_argument('--op_fusion', \\\n help='Whether to enable Auto graph fusion feature. Default to True',\n type=boolean_string,\n default=True)\n parser.add_argument('--micro_batch',\n help='Set num for Auto Mirco Batch. Default close.',\n type=int,\n default=0) #TODO: Defautl to True\n parser.add_argument('--adaptive_emb', \\\n help='Whether to enable Adaptive Embedding. Default to False.',\n type=boolean_string,\n default=False)\n parser.add_argument('--dynamic_ev', \\\n help='Whether to enable Dynamic-dimension Embedding Variable. Default to False.',\n type=boolean_string,\n default=False)#TODO:enable\n parser.add_argument('--incremental_ckpt', \\\n help='Set time of save Incremental Checkpoint. Default 0 to close.',\n type=int,\n default=0)\n parser.add_argument('--workqueue', \\\n help='Whether to enable Work Queue. Default to False.',\n type=boolean_string,\n default=False)\n return parser\n\n\n# Parse distributed training configuration and generate cluster information\ndef generate_cluster_info(TF_CONFIG):\n print(TF_CONFIG)\n tf_config = json.loads(TF_CONFIG)\n cluster_config = tf_config.get('cluster')\n ps_hosts = []\n worker_hosts = []\n chief_hosts = []\n for key, value in cluster_config.items():\n if 'ps' == key:\n ps_hosts = value\n elif 'worker' == key:\n worker_hosts = value\n elif 'chief' == key:\n chief_hosts = value\n if chief_hosts:\n worker_hosts = chief_hosts + worker_hosts\n\n if not ps_hosts or not worker_hosts:\n print('TF_CONFIG ERROR')\n sys.exit()\n task_config = tf_config.get('task')\n task_type = task_config.get('type')\n task_index = task_config.get('index') + (1 if task_type == 'worker'\n and chief_hosts else 0)\n\n if task_type == 'chief':\n task_type = 'worker'\n\n is_chief = True if task_index == 0 else False\n cluster = tf.train.ClusterSpec({'ps': ps_hosts, 'worker': worker_hosts})\n server = tf.distribute.Server(cluster,\n job_name=task_type,\n task_index=task_index,\n protocol=args.protocol)\n if task_type == 'ps':\n server.join()\n elif task_type == 'worker':\n tf_config = {\n 'ps_hosts': ps_hosts,\n 'worker_hosts': worker_hosts,\n 'type': task_type,\n 'index': task_index,\n 'is_chief': is_chief\n }\n tf_device = tf.device(\n tf.train.replica_device_setter(\n worker_device='/job:worker/task:%d' % task_index,\n cluster=cluster))\n return tf_config, server, tf_device\n else:\n print(\"Task type or index error.\")\n sys.exit()\n\n\n# Some DeepRec's features are enabled by ENV.\n# This func is used to set ENV and enable these features.\n# A triple quotes comment is used to introduce these features and play an emphasizing role.\ndef set_env_for_DeepRec():\n '''\n Set some ENV for these DeepRec's features enabled by ENV. \n More Detail information is shown in https://deeprec.readthedocs.io/zh/latest/index.html.\n START_STATISTIC_STEP & STOP_STATISTIC_STEP: On CPU platform, DeepRec supports memory optimization\n in both stand-alone and distributed trainging. It's default to open, and the \n default start and stop steps of collection is 1000 and 1100. Reduce the initial \n cold start time by the following settings.\n MALLOC_CONF: On CPU platform, DeepRec can use memory optimization with the jemalloc library.\n Please preload libjemalloc.so by `LD_PRELOAD=./libjemalloc.so.2 python ...`\n '''\n os.environ['START_STATISTIC_STEP'] = '100'\n os.environ['STOP_STATISTIC_STEP'] = '110'\n os.environ['MALLOC_CONF']= \\\n 'background_thread:true,metadata_thp:auto,dirty_decay_ms:20000,muzzy_decay_ms:20000'\n\n\nif __name__ == '__main__':\n parser = get_arg_parser()\n args = parser.parse_args()\n\n if not args.tf:\n set_env_for_DeepRec()\n\n TF_CONFIG = os.getenv('TF_CONFIG')\n if not TF_CONFIG:\n main()\n else:\n tf_config, server, tf_device = generate_cluster_info(TF_CONFIG)\n main(tf_config, server)\n",
"\"\"\"\n Copyright (c) 2021, NVIDIA CORPORATION.\n \n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nfrom tensorflow.python.framework import ops\nimport tensorflow as tf\n\nclass HorovodStrategy(object):\n def __init__(self):\n import horovod.tensorflow as hvd\n self._hvd = hvd\n def scope(self):\n return ops.NullContextmanager()\n\n def run(self, fn, *args, **kwargs):\n return fn(*args, **kwargs)\n\n def reduce(self, combiner, tensors):\n if isinstance(tensors, list) or isinstance(tensors, tuple):\n return [self._hvd.allreduce(tensor, op=self._hvd.Average, compression=self._hvd.compression.NoneCompressor) for tensor in tensors]\n else:\n return self._hvd.allreduce(tensors)\n\n def gather(self, tensors):\n if isinstance(tensors, list) or isinstance(tensors, tuple):\n return [self._hvd.allgather(tensor) for tensor in tensors]\n else:\n return self._hvd.allgather(tensors)\n\n def broadcast_variables(self, variables):\n return self._hvd.broadcast_variables(variables, root_rank=0)\n\nclass OneDeviceStrategy(object):\n def scope(self):\n return ops.NullContextmanager()\n \n def run(self, fn, *args, **kwargs):\n return fn(*args, **kwargs)\n\n def reduce(self, combiner, tensors):\n return tensors\n\n def gather(self, tensors):\n return tensors\n\n def broadcast_variables(self, variables):\n return variables\n\n\n\n"
] | [
[
"tensorflow.nn.compute_average_loss",
"tensorflow.Graph",
"tensorflow.local_variables_initializer",
"numpy.allclose",
"tensorflow.control_dependencies",
"tensorflow.constant",
"tensorflow.concat",
"tensorflow.gradients",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.identity",
"numpy.concatenate",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorflow.group"
],
[
"tensorflow.python.ops.gen_tensor_buffer_ops.tensor_buffer_close",
"tensorflow.python.framework.ops.NotDifferentiable",
"tensorflow.python.ops.array_ops.identity",
"tensorflow.python.ops.gen_tensor_buffer_ops.tensor_buffer_cancel",
"tensorflow.python.ops.gen_tensor_buffer_ops.tensor_buffer_take",
"tensorflow.python.util.tf_export.tf_export",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.prefetch_runner.PrefetchRunner",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.sparse_tensor.SparseTensor",
"tensorflow.python.framework.ops.device",
"tensorflow.python.ops.gen_tensor_buffer_ops.tensor_buffer_put",
"tensorflow.python.ops.control_flow_ops.no_op",
"tensorflow.python.util.nest.pack_sequence_as",
"tensorflow.python.util.nest.flatten",
"tensorflow.python.ops.prefetch_runner.PrefetchRunner.Hook"
],
[
"tensorflow.train.LoggingTensorHook",
"tensorflow.metrics.accuracy",
"tensorflow.train.AdagradDecayOptimizer",
"tensorflow.control_dependencies",
"tensorflow.cast",
"tensorflow.staged",
"tensorflow.train.ChiefSessionCreator",
"tensorflow.train.AdamAsyncOptimizer",
"tensorflow.train.AdamOptimizer",
"tensorflow.CounterFilter",
"tensorflow.data.TextLineDataset",
"tensorflow.train.MonitoredTrainingSession",
"tensorflow.summary.scalar",
"tensorflow.add_n",
"tensorflow.group",
"tensorflow.feature_column.categorical_column_with_adaptive_embedding",
"tensorflow.feature_column.embedding_column",
"tensorflow.feature_column.input_layer",
"tensorflow.get_collection",
"tensorflow.squeeze",
"tensorflow.distribute.Server",
"tensorflow.train.get_or_create_global_step",
"tensorflow.ConfigProto",
"tensorflow.layers.dense",
"tensorflow.logging.set_verbosity",
"tensorflow.name_scope",
"tensorflow.train.ProfilerHook",
"tensorflow.train.Saver",
"tensorflow.train.FtrlOptimizer",
"tensorflow.io.decode_csv",
"tensorflow.train.AdagradOptimizer",
"tensorflow.train.StopAtStepHook",
"tensorflow.CBFFilter",
"tensorflow.train.MonitoredSession",
"tensorflow.feature_column.categorical_column_with_embedding",
"tensorflow.feature_column.categorical_column_with_hash_bucket",
"tensorflow.math.sigmoid",
"tensorflow.summary.merge_all",
"tensorflow.feature_column.numeric_column",
"tensorflow.make_prefetch_hook",
"tensorflow.set_random_seed",
"tensorflow.GlobalStepEvict",
"tensorflow.round",
"tensorflow.summary.histogram",
"tensorflow.EmbeddingVariableOption",
"tensorflow.L2WeightEvict",
"tensorflow.local_variables_initializer",
"tensorflow.feature_column.linear_model",
"tensorflow.python.ops.work_queue.WorkQueue",
"tensorflow.losses.sigmoid_cross_entropy",
"tensorflow.train.ClusterSpec",
"tensorflow.ones",
"tensorflow.nn.zero_fraction",
"tensorflow.train.replica_device_setter",
"tensorflow.python.ops.partitioned_variables.min_max_variable_partitioner",
"tensorflow.data.Iterator.from_structure",
"tensorflow.glorot_uniform_initializer",
"tensorflow.variable_scope",
"tensorflow.metrics.auc"
],
[
"tensorflow.python.framework.ops.NullContextmanager"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
richardrl/graphics | [
"c05ee5b947bc462881968b4a109a9ba59ff8c6a8"
] | [
"tensorflow_graphics/rendering/opengl/math.py"
] | [
"# Copyright 2020 The TensorFlow Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"This module implements math routines used by OpenGL.\"\"\"\n\nimport tensorflow as tf\n\nfrom tensorflow_graphics.geometry.transformation import look_at\nfrom tensorflow_graphics.math.interpolation import weighted\nfrom tensorflow_graphics.rendering.camera import perspective\nfrom tensorflow_graphics.util import asserts\nfrom tensorflow_graphics.util import export_api\nfrom tensorflow_graphics.util import shape\n\n\ndef model_to_eye(point_model_space,\n camera_position,\n look_at_point,\n up_vector,\n name=\"model_to_eye\"):\n \"\"\"Transforms points from model to eye coordinates.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in model space.\n camera_position: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D position of the camera.\n look_at_point: A tensor of shape `[A1, ..., An, 3]`, with the last dimension\n storing the position where the camera is looking at.\n up_vector: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n defines the up vector of the camera.\n name: A name for this op. Defaults to \"model_to_eye\".\n\n Raises:\n ValueError: if the all the inputs are not of the same shape, or if any input\n of of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_model_space` in eye\n coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_model_space = tf.convert_to_tensor(value=point_model_space)\n camera_position = tf.convert_to_tensor(value=camera_position)\n look_at_point = tf.convert_to_tensor(value=look_at_point)\n up_vector = tf.convert_to_tensor(value=up_vector)\n\n shape.check_static(\n tensor=point_model_space,\n tensor_name=\"point_model_space\",\n has_dim_equals=(-1, 3))\n shape.compare_batch_dimensions(\n tensors=(point_model_space, camera_position),\n last_axes=-2,\n tensor_names=(\"point_model_space\", \"camera_position\"),\n broadcast_compatible=True)\n\n model_to_eye_matrix = look_at.right_handed(camera_position, look_at_point,\n up_vector)\n batch_shape = tf.shape(input=point_model_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_model_space.dtype)\n point_model_space = tf.concat((point_model_space, one), axis=-1)\n point_model_space = tf.expand_dims(point_model_space, axis=-1)\n res = tf.squeeze(tf.matmul(model_to_eye_matrix, point_model_space), axis=-1)\n return res[..., :-1]\n\n\ndef eye_to_clip(point_eye_space,\n vertical_field_of_view,\n aspect_ratio,\n near,\n far,\n name=\"eye_to_clip\"):\n \"\"\"Transforms points from eye to clip space.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_eye_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in eye coordinates.\n vertical_field_of_view: A tensor of shape `[A1, ..., An, 1]`, where the last\n dimension represents the vertical field of view of the frustum. Note that\n values for `vertical_field_of_view` must be in the range ]0,pi[.\n aspect_ratio: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n stores the width over height ratio of the frustum. Note that values for\n `aspect_ratio` must be non-negative.\n near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the near clipping plane. Note\n that values for `near` must be non-negative.\n far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension captures\n the distance between the viewer and the far clipping plane. Note that\n values for `far` must be non-negative.\n name: A name for this op. Defaults to \"eye_to_clip\".\n\n Raises:\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 4]`, containing `point_eye_space` in\n homogeneous clip coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_eye_space = tf.convert_to_tensor(value=point_eye_space)\n vertical_field_of_view = tf.convert_to_tensor(value=vertical_field_of_view)\n aspect_ratio = tf.convert_to_tensor(value=aspect_ratio)\n near = tf.convert_to_tensor(value=near)\n far = tf.convert_to_tensor(value=far)\n\n shape.check_static(\n tensor=point_eye_space,\n tensor_name=\"point_eye_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=vertical_field_of_view,\n tensor_name=\"vertical_field_of_view\",\n has_dim_equals=(-1, 1))\n shape.check_static(\n tensor=aspect_ratio, tensor_name=\"aspect_ratio\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=near, tensor_name=\"near\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=far, tensor_name=\"far\", has_dim_equals=(-1, 1))\n shape.compare_batch_dimensions(\n tensors=(point_eye_space, vertical_field_of_view, aspect_ratio, near,\n far),\n last_axes=-2,\n tensor_names=(\"point_eye_space\", \"vertical_field_of_view\",\n \"aspect_ratio\", \"near\", \"far\"),\n broadcast_compatible=True)\n\n perspective_matrix = perspective.right_handed(vertical_field_of_view,\n aspect_ratio, near, far)\n batch_shape = tf.shape(input=point_eye_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_eye_space.dtype)\n point_eye_space = tf.concat((point_eye_space, one), axis=-1)\n point_eye_space = tf.expand_dims(point_eye_space, axis=-1)\n\n return tf.squeeze(tf.matmul(perspective_matrix, point_eye_space), axis=-1)\n\n\ndef clip_to_ndc(point_clip_space, name=\"clip_to_ndc\"):\n \"\"\"Transforms points from clip to normalized device coordinates (ndc).\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n point_clip_space: A tensor of shape `[A1, ..., An, 4]`, where the last\n dimension represents points in clip space.\n name: A name for this op. Defaults to \"clip_to_ndc\".\n\n Raises:\n ValueError: If `point_clip_space` is not of size 4 in its last dimension.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_clip_space` in\n normalized device coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_clip_space = tf.convert_to_tensor(value=point_clip_space)\n\n shape.check_static(\n tensor=point_clip_space,\n tensor_name=\"point_clip_space\",\n has_dim_equals=(-1, 4))\n\n w = point_clip_space[..., -1:]\n return point_clip_space[..., :3] / w\n\n\ndef ndc_to_screen(point_ndc_space,\n lower_left_corner,\n screen_dimensions,\n near,\n far,\n name=\"ndc_to_screen\"):\n \"\"\"Transforms points from normalized device coordinates to screen coordinates.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible between `point_ndc_space` and the other variables.\n\n Args:\n point_ndc_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents points in normalized device coordinates.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n near: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the near clipping plane. Note\n that values for `near` must be non-negative.\n far: A tensor of shape `[A1, ..., An, 1]`, where the last dimension\n captures the distance between the viewer and the far clipping plane. Note\n that values for `far` must be greater than those of `near`.\n name: A name for this op. Defaults to \"ndc_to_screen\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing `point_ndc_space` in\n screen coordinates.\n \"\"\"\n with tf.name_scope(name):\n point_ndc_space = tf.convert_to_tensor(value=point_ndc_space)\n lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)\n screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)\n near = tf.convert_to_tensor(value=near)\n far = tf.convert_to_tensor(value=far)\n\n shape.check_static(\n tensor=point_ndc_space,\n tensor_name=\"point_ndc_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=lower_left_corner,\n tensor_name=\"lower_left_corner\",\n has_dim_equals=(-1, 2))\n shape.check_static(\n tensor=screen_dimensions,\n tensor_name=\"screen_dimensions\",\n has_dim_equals=(-1, 2))\n shape.check_static(tensor=near, tensor_name=\"near\", has_dim_equals=(-1, 1))\n shape.check_static(tensor=far, tensor_name=\"far\", has_dim_equals=(-1, 1))\n\n shape.compare_batch_dimensions(\n tensors=(lower_left_corner, screen_dimensions, near, far),\n last_axes=-2,\n tensor_names=(\"lower_left_corner\", \"screen_dimensions\", \"near\", \"far\"),\n broadcast_compatible=False)\n shape.compare_batch_dimensions(\n tensors=(point_ndc_space, near),\n last_axes=-2,\n tensor_names=(\"point_ndc_space\", \"near\"),\n broadcast_compatible=True)\n\n screen_dimensions = asserts.assert_all_above(\n screen_dimensions, 0.0, open_bound=True)\n near = asserts.assert_all_above(near, 0.0, open_bound=True)\n far = asserts.assert_all_above(far, near, open_bound=True)\n\n ndc_to_screen_factor = tf.concat(\n (screen_dimensions, far - near), axis=-1) / 2.0\n screen_center = tf.concat(\n (lower_left_corner + screen_dimensions / 2.0, (near + far) / 2.0),\n axis=-1)\n return ndc_to_screen_factor * point_ndc_space + screen_center\n\n\ndef model_to_screen(point_model_space,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"model_to_screen\"):\n \"\"\"Transforms points from model to screen coordinates.\n\n Note:\n Please refer to http://www.songho.ca/opengl/gl_transform.html for an\n in-depth review of this pipeline.\n\n Note:\n In the following, A1 to An are optional batch dimensions which must be\n broadcast compatible.\n\n Args:\n point_model_space: A tensor of shape `[A1, ..., An, 3]`, where the last\n dimension represents the 3D points in model space.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"model_to_screen\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tuple of two tensors, respectively of shape `[A1, ..., An, 3]` and\n `[A1, ..., An, 1]`, where the first tensor containing the projection of\n `point_model_space` in screen coordinates, and the second represents the 'w'\n component of `point_model_space` in clip space.\n \"\"\"\n with tf.name_scope(name):\n point_model_space = tf.convert_to_tensor(value=point_model_space)\n model_to_eye_matrix = tf.convert_to_tensor(value=model_to_eye_matrix)\n perspective_matrix = tf.convert_to_tensor(value=perspective_matrix)\n\n shape.check_static(\n tensor=point_model_space,\n tensor_name=\"point_model_space\",\n has_dim_equals=(-1, 3))\n shape.check_static(\n tensor=model_to_eye_matrix,\n tensor_name=\"model_to_eye_matrix\",\n has_dim_equals=((-1, 4), (-2, 4)))\n shape.check_static(\n tensor=perspective_matrix,\n tensor_name=\"perspective_matrix\",\n has_dim_equals=((-1, 4), (-2, 4)))\n shape.compare_batch_dimensions(\n tensors=(point_model_space, model_to_eye_matrix, perspective_matrix),\n last_axes=(-2, -3, -3),\n tensor_names=(\"point_model_space\", \"model_to_eye_matrix\",\n \"perspective_matrix\"),\n broadcast_compatible=True)\n\n batch_shape = tf.shape(input=point_model_space)[:-1]\n one = tf.ones(\n shape=tf.concat((batch_shape, (1,)), axis=-1),\n dtype=point_model_space.dtype)\n point_model_space = tf.concat((point_model_space, one), axis=-1)\n point_model_space = tf.expand_dims(point_model_space, axis=-1)\n\n view_projection_matrix = tf.linalg.matmul(perspective_matrix,\n model_to_eye_matrix)\n\n _, _, near, far = perspective.parameters_from_right_handed(\n perspective_matrix)\n\n point_clip_space = tf.squeeze(\n tf.matmul(view_projection_matrix, point_model_space), axis=-1)\n point_ndc_space = clip_to_ndc(point_clip_space)\n point_screen_space = ndc_to_screen(point_ndc_space, lower_left_corner,\n screen_dimensions, near, far)\n return point_screen_space, point_clip_space[..., 3:4]\n\n\ndef perspective_correct_barycentrics(triangle_vertices_model_space,\n pixel_position,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"perspective_correct_barycentrics\"):\n \"\"\"Computes perspective correct barycentrics.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n triangle_vertices_model_space: A tensor of shape `[A1, ..., An, 3, 3]`,\n where the last dimension represents the vertices of a triangle in model\n space.\n pixel_position: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension stores the position (in pixels) where the interpolation is\n requested.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"perspective_correct_barycentrics\".\n\n Raises:\n InvalidArgumentError: if any input contains data not in the specified range\n of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, 3]`, containing perspective correct\n barycentric coordinates.\n \"\"\"\n with tf.name_scope(name):\n pixel_position = tf.convert_to_tensor(value=pixel_position)\n triangle_vertices_model_space = tf.convert_to_tensor(\n value=triangle_vertices_model_space)\n shape.check_static(\n tensor=pixel_position,\n tensor_name=\"pixel_position\",\n has_dim_equals=(-1, 2))\n shape.check_static(\n tensor=triangle_vertices_model_space,\n tensor_name=\"triangle_vertices_model_space\",\n has_dim_equals=((-2, 3), (-1, 3)))\n\n lower_left_corner = tf.convert_to_tensor(value=lower_left_corner)\n screen_dimensions = tf.convert_to_tensor(value=screen_dimensions)\n lower_left_corner = shape.add_batch_dimensions(\n lower_left_corner,\n \"lower_left_corner\",\n model_to_eye_matrix.shape[:-2],\n last_axis=-2)\n screen_dimensions = shape.add_batch_dimensions(\n screen_dimensions,\n \"screen_dimensions\",\n model_to_eye_matrix.shape[:-2],\n last_axis=-2)\n\n vertices_screen, vertices_w = model_to_screen(triangle_vertices_model_space,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner)\n vertices_w = tf.squeeze(vertices_w, axis=-1)\n pixel_position = tf.expand_dims(pixel_position, axis=-2)\n barycentric_coordinates, _ = weighted.get_barycentric_coordinates(\n vertices_screen[..., :2], pixel_position)\n barycentric_coordinates = tf.squeeze(barycentric_coordinates, axis=-2)\n coeffs = barycentric_coordinates / vertices_w\n return tf.linalg.normalize(coeffs, ord=1, axis=-1)[0]\n\n\ndef interpolate_attributes(attribute,\n barycentric,\n name=\"interpolate_attributes\"):\n \"\"\"Interpolates attributes using barycentric weights.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n attribute: A tensor of shape `[A1, ..., An, 3, B]`, where the last dimension\n stores a per-vertex `B`-dimensional attribute.\n barycentric: A tensor of shape `[A1, ..., An, 3]`, where the last dimension\n contains barycentric coordinates.\n name: A name for this op. Defaults to \"interpolate_attributes\".\n\n Returns:\n A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.\n \"\"\"\n with tf.name_scope(name):\n attribute = tf.convert_to_tensor(value=attribute)\n barycentric = tf.convert_to_tensor(value=barycentric)\n\n shape.check_static(\n tensor=attribute, tensor_name=\"attribute\", has_dim_equals=(-2, 3))\n shape.check_static(\n tensor=barycentric, tensor_name=\"barycentric\", has_dim_equals=(-1, 3))\n shape.compare_batch_dimensions(\n tensors=(attribute, barycentric),\n last_axes=(-2, -1),\n tensor_names=(\"attribute\", \"barycentric\"),\n broadcast_compatible=True)\n barycentric = asserts.assert_normalized(barycentric, order=1)\n return tf.reduce_sum(\n input_tensor=tf.expand_dims(barycentric, axis=-1) * attribute, axis=-2)\n\n\ndef perspective_correct_interpolation(triangle_vertices_model_space,\n attribute,\n pixel_position,\n model_to_eye_matrix,\n perspective_matrix,\n screen_dimensions,\n lower_left_corner=(0.0, 0.0),\n name=\"perspective_correct_interpolation\"):\n \"\"\"Returns perspective corrected interpolation of attributes over triangles.\n\n Note:\n In the following, A1 to An are optional batch dimensions.\n\n Args:\n triangle_vertices_model_space: A tensor of shape `[A1, ..., An, 3, 3]`,\n where the last dimension represents the vertices of a triangle in model\n space.\n attribute: A tensor of shape `[A1, ..., An, 3, B]`, where the last dimension\n stores a per-vertex `B`-dimensional attribute.\n pixel_position: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension stores the position (in pixels) where the interpolation is\n requested.\n model_to_eye_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from model to eye\n coordinates.\n perspective_matrix: A tensor of shape `[A1, ..., An, 4, 4]`, where the last\n two dimension represent matrices to transform points from eye to clip\n coordinates.\n screen_dimensions: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension is expressed in pixels and captures the width and the height (in\n pixels) of the screen.\n lower_left_corner: A tensor of shape `[A1, ..., An, 2]`, where the last\n dimension captures the position (in pixels) of the lower left corner of\n the screen.\n name: A name for this op. Defaults to \"perspective_correct_interpolation\".\n\n Raises:\n tf.errors.InvalidArgumentError: if any input contains data not in the\n specified range of valid values.\n ValueError: If any input is of an unsupported shape.\n\n Returns:\n A tensor of shape `[A1, ..., An, B]`, containing interpolated attributes.\n \"\"\"\n with tf.name_scope(name):\n barycentric = perspective_correct_barycentrics(\n triangle_vertices_model_space, pixel_position, model_to_eye_matrix,\n perspective_matrix, screen_dimensions, lower_left_corner)\n return interpolate_attributes(attribute, barycentric)\n\n\n# API contains all public functions and classes.\n__all__ = export_api.get_functions_and_classes()\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.matmul",
"tensorflow.concat",
"tensorflow.shape",
"tensorflow.expand_dims",
"tensorflow.linalg.matmul",
"tensorflow.squeeze",
"tensorflow.name_scope",
"tensorflow.linalg.normalize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Anthuang/bdd100k | [
"b7e1781317784317e4e715ab325515ade73978a9"
] | [
"bdd100k/vis/viewer.py"
] | [
"\"\"\"An offline label visualizer for BDD100K file.\n\nWorks for 2D / 3D bounding box, segmentation masks, etc.\n\"\"\"\n\nimport argparse\nimport concurrent.futures\nfrom typing import Dict\n\nimport numpy as np\nfrom scalabel.common.parallel import NPROC\nfrom scalabel.common.typing import NDArrayF64\nfrom scalabel.label.typing import Label\nfrom scalabel.vis.controller import (\n ControllerConfig,\n DisplayConfig,\n ViewController,\n)\nfrom scalabel.vis.label import LabelViewer, UIConfig\n\nfrom ..label.label import drivables, labels, lane_categories\n\n\nclass LabelViewerBDD100K(LabelViewer):\n \"\"\"Basic class for viewing BDD100K labels.\"\"\"\n\n def __init__(self, ui_cfg: UIConfig) -> None:\n \"\"\"Initializer.\"\"\"\n super().__init__(ui_cfg)\n self.colors: Dict[str, NDArrayF64] = {\n label.name: np.array(label.color)\n for label in labels\n if not label.hasInstances\n }\n self.colors.update(\n {drivable.name: np.array(drivable.color) for drivable in drivables}\n )\n self.colors.update(\n {lane.name: np.array(lane.color) for lane in lane_categories}\n )\n\n def _get_label_color(self, label: Label) -> NDArrayF64:\n \"\"\"Get color by category and id.\"\"\"\n if label.category in self.colors:\n return self.colors[label.category] / 255.0\n return super()._get_label_color(label)\n\n\ndef parse_args() -> argparse.Namespace:\n \"\"\"Use argparse to get command line arguments.\"\"\"\n parser = argparse.ArgumentParser(\n \"\"\"\nInterface keymap:\n - n / p: Show next or previous image\n - Space: Start / stop animation\n - t: Toggle 2D / 3D bounding box (if avaliable)\n - a: Toggle the display of the attribute tags on boxes or polygons.\n - c: Toggle the display of polygon vertices.\n - Up: Increase the size of polygon vertices.\n - Down: Decrease the size of polygon vertices.\nExport images:\n - add `-o {dir}` tag when runing.\n \"\"\"\n )\n parser.add_argument(\"-i\", \"--image-dir\", help=\"image directory\")\n parser.add_argument(\n \"-l\",\n \"--labels\",\n required=False,\n default=\"labels.json\",\n help=\"Path to the json file\",\n type=str,\n )\n parser.add_argument(\n \"--height\",\n type=int,\n default=720,\n help=\"Height of the image (px)\",\n )\n parser.add_argument(\n \"--width\",\n type=int,\n default=1280,\n help=\"Width of the image (px)\",\n )\n parser.add_argument(\n \"-s\",\n \"--scale\",\n type=float,\n default=1.0,\n help=\"Scale up factor for annotation factor. \"\n \"Useful when producing visualization as thumbnails.\",\n )\n parser.add_argument(\n \"--no-attr\",\n action=\"store_true\",\n default=False,\n help=\"Do not show attributes\",\n )\n parser.add_argument(\n \"--no-box3d\",\n action=\"store_true\",\n default=True,\n help=\"Do not show 3D bounding boxes\",\n )\n parser.add_argument(\n \"--no-tags\",\n action=\"store_true\",\n default=False,\n help=\"Do not show tags on boxes or polygons\",\n )\n parser.add_argument(\n \"--no-vertices\",\n action=\"store_true\",\n default=False,\n help=\"Do not show vertices\",\n )\n parser.add_argument(\n \"-o\",\n \"--output-dir\",\n required=False,\n default=None,\n type=str,\n help=\"output image directory with label visualization. \"\n \"If it is set, the images will be written to the \"\n \"output folder instead of being displayed \"\n \"interactively.\",\n )\n parser.add_argument(\n \"--range-begin\",\n type=int,\n default=0,\n help=\"from which frame to visualize. Default is 0.\",\n )\n parser.add_argument(\n \"--range-end\",\n type=int,\n default=-1,\n help=\"up to which frame to visualize. Default is -1, \"\n \"indicating loading all frames for visualizatoin.\",\n )\n parser.add_argument(\n \"--nproc\",\n type=int,\n default=NPROC,\n help=\"number of processes for json loading\",\n )\n\n args = parser.parse_args()\n\n return args\n\n\ndef main() -> None:\n \"\"\"Main function.\"\"\"\n args = parse_args()\n # Initialize the thread executor.\n with concurrent.futures.ThreadPoolExecutor(max_workers=16) as executor:\n ui_cfg = UIConfig(\n height=args.height,\n width=args.width,\n scale=args.scale,\n )\n display_cfg = DisplayConfig(\n with_attr=not args.no_attr,\n with_box2d=args.no_box3d,\n with_box3d=not args.no_box3d,\n with_ctrl_points=not args.no_vertices,\n with_tags=not args.no_tags,\n )\n viewer = LabelViewer(ui_cfg)\n\n ctrl_cfg = ControllerConfig(\n image_dir=args.image_dir,\n label_path=args.labels,\n out_dir=args.output_dir,\n nproc=args.nproc,\n range_begin=args.range_begin,\n range_end=args.range_end,\n )\n controller = ViewController(ctrl_cfg, display_cfg, executor)\n viewer.run_with_controller(controller)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
PedroRisquez/c19norge-data | [
"ac38460800d5d311877b949c74444b44a0916575"
] | [
"src/utils/graphs.py"
] | [
"import os\nfrom datetime import date\nimport altair as alt\nimport pandas as pd\n\n\ndef tested_lab():\n data = \"data/tested_lab.csv\"\n filename = \"graphs/tested_lab.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n mapping = {\n \"new_neg\": \"New (Negative)\",\n \"new_pos\": \"New (Positive)\",\n \"new_total\": \"New\",\n \"pr100_pos\": \"Share Positive\",\n \"total\": \"Cumulative\",\n }\n\n df = df.rename(columns=mapping)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df[\"Share Negative\"] = 100 - df[\"Share Positive\"]\n df = df.melt(\n id_vars=[\"date\", \"Share Positive\"], var_name=\"category\", value_name=\"value\"\n )\n\n base = alt.Chart(\n df,\n title=\"Number of tested persons per specimen collection date and number of positive results (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n andel = base.mark_line(color=\"red\", opacity=0.8).encode(\n y=alt.Y(\"Share Positive:Q\", title=\"% Positive\", axis=alt.Axis(grid=True))\n )\n\n bar = (\n base.transform_filter(\n (alt.datum.category == \"New (Negative)\")\n | (alt.datum.category == \"New (Positive)\")\n )\n .mark_bar()\n .encode(\n y=alt.Y(\"value:Q\", title=\"Number of persons\"),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New (Positive)\", \"New (Negative)\", \"% Positive\"],\n range=[\"#FF9622\", \"#6DA9FF\", \"red\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n chart = (\n alt.layer(bar, andel)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef confirmed():\n data = \"data/confirmed.csv\"\n filename = \"graphs/confirmed.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df.loc[df[\"source\"] == \"fhi:git\"]\n df[\"new_sma7\"] = df.new.rolling(window=7).mean().shift()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new\", \"new_sma7\", \"total\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\"new\": \"New cases\", \"new_sma7\": \"Avg 7 d.\", \"total\": \"Cumulative\"}\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(\n df,\n title=\"Number of reported COVID-19 cases by specimen collection date (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n bar = (\n base.transform_filter(alt.datum.category == \"New cases\")\n .mark_bar(color=\"#FFD1D1\")\n .encode(y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"New per day\", grid=True)))\n )\n\n line = (\n base.transform_filter(alt.datum.category == \"Cumulative\")\n .mark_line(color=\"#2E507B\", strokeWidth=3)\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Cumulative\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New cases\", \"Avg 7 d.\", \"Cumulative\"],\n range=[\"#FFD1D1\", \"red\", \"#2E507B\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n ma7 = (\n base.transform_filter(alt.datum.category == \"Avg 7 d.\")\n .mark_line(opacity=0.8)\n .encode(y=alt.Y(\"value:Q\"), color=alt.Color(\"category:N\"))\n )\n\n chart = (\n alt.layer(bar + ma7, line)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef dead():\n data = \"data/dead.csv\"\n filename = \"graphs/dead.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n today = date.today()\n idx = pd.date_range(\"2020-03-07\", df[\"date\"].max())\n df.index = pd.DatetimeIndex(df[\"date\"])\n df = df.reindex(idx)\n df[\"date\"] = df.index\n df = df.reset_index(drop=True)\n df = df[df.date <= str(today)]\n\n df[\"new\"] = df[\"new\"].fillna(0).astype(int)\n df[\"total\"] = df[\"total\"].fillna(method=\"bfill\").astype(int)\n df[\"new_sma7\"] = df.new.rolling(window=7).mean()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new\", \"new_sma7\", \"total\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\"new\": \"New\", \"new_sma7\": \"Avg 7 d.\", \"total\": \"Cumulative\"}\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(df, title=\"COVID-19 related deaths (Source: FHI)\").encode(\n alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40))\n )\n\n bar = (\n base.transform_filter(alt.datum.category == \"New\")\n .mark_bar(color=\"#FFD1D1\")\n .encode(y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"New per day\", grid=True)))\n )\n\n line = (\n base.transform_filter(alt.datum.category == \"Cumulative\")\n .mark_line(color=\"#2E507B\", strokeWidth=3)\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Cumulative\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\"New\", \"Avg 7 d.\", \"Cumulative\"],\n range=[\"#FFD1D1\", \"red\", \"#2E507B\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n ma7 = (\n base.transform_filter(alt.datum.category == \"Avg 7 d.\")\n .mark_line(opacity=0.8)\n .encode(y=alt.Y(\"value:Q\"), color=alt.Color(\"category:N\"))\n )\n\n chart = (\n alt.layer(bar + ma7, line)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef hospitalized():\n data = \"data/hospitalized.csv\"\n filename = \"graphs/hospitalized.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n\n today = date.today()\n idx = pd.date_range(\"2020-03-08\", today)\n df.index = pd.DatetimeIndex(df[\"date\"])\n df = df.reindex(idx)\n df[\"date\"] = df.index\n df = df.reset_index(drop=True)\n\n df[\"admissions\"] = df[\"admissions\"].fillna(method=\"ffill\").astype(int)\n df[\"respiratory\"] = df[\"respiratory\"].fillna(method=\"ffill\").astype(int)\n\n df_melt = pd.melt(\n df,\n id_vars=[\"date\"],\n value_vars=[\"admissions\", \"respiratory\"],\n value_name=\"value\",\n ).replace({\"admissions\": \"Hospitalized\", \"respiratory\": \"Respirator\"})\n\n chart = (\n alt.Chart(\n df_melt,\n title=\"Number of patients admitted to hospital with COVID-19 (Source: Helsedirektoratet)\",\n )\n .mark_area(line={}, opacity=0.3)\n .encode(\n x=alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)),\n y=alt.Y(\n \"value:Q\",\n stack=None,\n title=\"Number of patients\",\n ),\n color=alt.Color(\n \"variable:N\",\n scale=alt.Scale(\n domain=[\"Hospitalized\", \"Respirator\"], range=[\"#5A9DFF\", \"#FF8B1B\"]\n ),\n legend=alt.Legend(title=None),\n ),\n )\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=655,\n )\n )\n\n chart.save(filename)\n\n\ndef smittestopp():\n data = \"data/smittestopp.csv\"\n filename = \"graphs/smittestopp.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"new_reported\", \"total_downloads\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\n \"new_reported\": \"Number of reported infections\",\n \"total_downloads\": \"Number of downloads\",\n }\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n base = alt.Chart(\n df,\n title=\"Number of downloads of Smittestopp og number of reported infections through the app (Source: FHI)\",\n ).encode(alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)))\n\n downloads = (\n base.transform_filter(alt.datum.category == \"Number of downloads\")\n .mark_area(line={}, color=\"#5BC1FF\", opacity=0.2)\n .encode(\n y=alt.Y(\n \"value:Q\",\n axis=alt.Axis(title=\"Number of downloads\", grid=True),\n )\n )\n )\n\n reported = (\n base.transform_filter(alt.datum.category == \"Number of reported infections\")\n .mark_bar(color=\"#FFA57E\")\n .encode(\n y=alt.Y(\"value:Q\", axis=alt.Axis(title=\"Number of reported infections\")),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\n \"Number of downloads\",\n \"Number of reported infections\",\n ],\n range=[\"#5BC1FF\", \"#FFA57E\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n )\n\n chart = (\n alt.layer(reported, downloads)\n .resolve_scale(y=\"independent\")\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n labelLimit=200,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=390,\n legendY=660,\n )\n )\n\n chart.save(filename)\n\n\ndef vaccine_doses():\n data = \"data/vaccine_doses.csv\"\n filename = \"graphs/vaccine_doses.png\"\n if os.path.exists(filename):\n os.remove(filename)\n\n df = pd.read_csv(data)\n df[\"date\"] = pd.to_datetime(df[\"date\"])\n df = df[df[\"granularity_geo\"] == \"nation\"]\n df[\"new_sma7\"] = df.new_doses.rolling(window=7).mean().shift()\n\n df = df.melt(\n id_vars=[\"date\"],\n value_vars=[\"total_dose_1\", \"total_dose_2\"],\n var_name=\"category\",\n value_name=\"value\",\n ).dropna()\n\n rename = {\n \"total_dose_1\": \"Vaccinated with first dose\",\n \"total_dose_2\": \"Fully vaccinated\",\n }\n\n df[\"category\"] = df[\"category\"].replace(rename)\n\n chart = (\n alt.Chart(\n df,\n title=\"Number of people who received their first and second dose of a COVID-19 vaccine in Norway (Source: FHI)\",\n )\n .mark_area(line={}, opacity=0.3)\n .encode(\n x=alt.X(\"yearmonthdate(date):O\", axis=alt.Axis(title=None, labelAngle=-40)),\n y=alt.Y(\n \"value:Q\",\n stack=None,\n title=\"Number of people\",\n ),\n color=alt.Color(\n \"category:N\",\n scale=alt.Scale(\n domain=[\n \"Vaccinated with first dose\",\n \"Fully vaccinated\",\n ],\n range=[\"#5dade2\", \" #2ecc71\"],\n ),\n legend=alt.Legend(title=None),\n ),\n )\n .properties(width=1200, height=600)\n .configure_legend(\n strokeColor=\"gray\",\n fillColor=\"#FFFFFF\",\n labelFontSize=12,\n symbolStrokeWidth=2,\n symbolSize=160,\n padding=6,\n cornerRadius=5,\n direction=\"horizontal\",\n orient=\"none\",\n legendX=480,\n legendY=660,\n )\n )\n\n chart.save(filename)\n"
] | [
[
"pandas.read_csv",
"pandas.to_datetime",
"pandas.DatetimeIndex",
"pandas.date_range",
"pandas.melt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jnsrch/disentangling-vae-cwt | [
"0e927bdcd3d149cadb30aa107331f0c071138c41"
] | [
"disvae/training.py"
] | [
"import imageio\nimport logging\nimport os\nfrom timeit import default_timer\nfrom collections import defaultdict\nfrom utils.datasets import DATASETS_DICT\n\nfrom tqdm import trange\nimport torch\nfrom torch.nn import functional as F\n\nfrom disvae.utils.modelIO import save_model\n\n\nTRAIN_LOSSES_LOGFILE = \"train_losses.log\"\n\n\nclass Trainer():\n \"\"\"\n Class to handle training of model.\n\n Parameters\n ----------\n model: disvae.vae.VAE\n\n optimizer: torch.optim.Optimizer\n\n loss_f: disvae.models.BaseLoss\n Loss function.\n\n device: torch.device, optional\n Device on which to run the code.\n\n logger: logging.Logger, optional\n Logger.\n\n save_dir : str, optional\n Directory for saving logs.\n\n gif_visualizer : viz.Visualizer, optional\n Gif Visualizer that should return samples at every epochs.\n\n is_progress_bar: bool, optional\n Whether to use a progress bar for training.\n \"\"\"\n\n def __init__(self, model, optimizer, loss_f,\n device=torch.device(\"cpu\"),\n logger=logging.getLogger(__name__),\n save_dir=\"results\",\n gif_visualizer=None,\n is_progress_bar=True):\n\n self.device = device\n self.model = model.to(self.device)\n self.loss_f = loss_f\n self.optimizer = optimizer\n self.save_dir = save_dir\n self.is_progress_bar = is_progress_bar\n self.logger = logger\n self.losses_logger = LossesLogger(os.path.join(self.save_dir, TRAIN_LOSSES_LOGFILE))\n self.gif_visualizer = gif_visualizer\n self.logger.info(\"Training Device: {}\".format(self.device))\n\n def __call__(self, data_loader,\n epochs=10,\n checkpoint_every=10):\n \"\"\"\n Trains the model.\n\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n\n epochs: int, optional\n Number of epochs to train the model for.\n\n checkpoint_every: int, optional\n Save a checkpoint of the trained model every n epoch.\n \"\"\"\n start = default_timer()\n self.model.train()\n for epoch in range(epochs):\n storer = defaultdict(list)\n mean_epoch_loss = self._train_epoch(data_loader, storer, epoch)\n self.logger.info('Epoch: {} Average loss per image: {:.2f}'.format(epoch + 1,\n mean_epoch_loss))\n self.losses_logger.log(epoch, storer)\n\n if self.gif_visualizer is not None:\n self.gif_visualizer()\n\n if epoch % checkpoint_every == 0:\n save_model(self.model, self.save_dir,\n filename=\"model-{}.pt\".format(epoch))\n\n if self.gif_visualizer is not None:\n self.gif_visualizer.save_reset()\n\n self.model.eval()\n\n delta_time = (default_timer() - start) / 60\n self.logger.info('Finished training after {:.1f} min.'.format(delta_time))\n\n def _train_epoch(self, data_loader, storer, epoch):\n \"\"\"\n Trains the model for one epoch.\n\n Parameters\n ----------\n data_loader: torch.utils.data.DataLoader\n\n storer: dict\n Dictionary in which to store important variables for vizualisation.\n\n epoch: int\n Epoch number\n\n Return\n ------\n mean_epoch_loss: float\n Mean loss per image\n \"\"\"\n epoch_loss = 0.\n kwargs = dict(desc=\"Epoch {}\".format(epoch + 1), leave=False,\n disable=not self.is_progress_bar)\n with trange(len(data_loader), **kwargs) as t:\n for _, data in enumerate(data_loader):\n data = data[0] # Tensors are nested in dataset\n iter_loss = self._train_iteration(data, storer)\n epoch_loss += iter_loss\n\n t.set_postfix(loss=iter_loss)\n t.update()\n\n mean_epoch_loss = epoch_loss / len(data_loader)\n return mean_epoch_loss\n\n def _train_iteration(self, data, storer):\n \"\"\"\n Trains the model for one iteration on a batch of data.\n\n Parameters\n ----------\n data: torch.Tensor\n A batch of data. Shape : (batch_size, channel, height, width).\n\n storer: dict\n Dictionary in which to store important variables for vizualisation.\n \"\"\"\n\n data = data.to(self.device)\n\n try:\n recon_batch, latent_dist, latent_sample = self.model(data)\n loss = self.loss_f(data, recon_batch, latent_dist, self.model.training,\n storer, latent_sample=latent_sample)\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n except ValueError:\n # for losses that use multiple optimizers (e.g. Factor)\n loss = self.loss_f.call_optimize(data, self.model, self.optimizer, storer)\n\n return loss.item()\n\n\nclass LossesLogger(object):\n \"\"\"Class definition for objects to write data to log files in a\n form which is then easy to be plotted.\n \"\"\"\n\n def __init__(self, file_path_name):\n \"\"\" Create a logger to store information for plotting. \"\"\"\n if os.path.isfile(file_path_name):\n os.remove(file_path_name)\n\n self.logger = logging.getLogger(\"losses_logger\")\n self.logger.setLevel(1) # always store\n file_handler = logging.FileHandler(file_path_name)\n file_handler.setLevel(1)\n self.logger.addHandler(file_handler)\n\n header = \",\".join([\"Epoch\", \"Loss\", \"Value\"])\n self.logger.debug(header)\n\n def log(self, epoch, losses_storer):\n \"\"\"Write to the log file \"\"\"\n for k, v in losses_storer.items():\n log_string = \",\".join(str(item) for item in [epoch, k, mean(v)])\n self.logger.debug(log_string)\n\n\n# HELPERS\ndef mean(l):\n \"\"\"Compute the mean of a list\"\"\"\n return sum(l) / len(l)\n"
] | [
[
"torch.device"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alaaib/NetworkAnalysis | [
"bf45d616b3a2f40cec3879515fe8ecdbe19b0537"
] | [
"ClusterGivenGraph/main.py"
] | [
"import datetime\nimport sys\nimport time\nimport os\n\nfrom ClusterGivenGraph.Graph import Graph\nfrom ClusterGivenGraph.GraphHelper import get_graph_based_degree_sequence, create_motifs, export_to_pajek, \\\n motifs_main_calculation, calc_z_score, export_to_pajek_by_z_score\nimport networkx as nx\nfrom matplotlib import pyplot as plt\nimport statistics\n\nglob_i = 0\nglob_itrs = 0\nglob_req_time = ''\n\n\ndef print_progress_bar(prefix='', suffix='', decimals=1, length=50, fill='█'):\n global glob_i\n glob_i = glob_i + 1\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (glob_i / float(glob_itrs)))\n filled_length = int(length * glob_i // glob_itrs)\n bar = fill * filled_length + '-' * (length - filled_length)\n print('\\r%s |%s| %s%% %s ' % (prefix, bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if glob_i == glob_itrs:\n print()\n\n\ndef start_here(node_cnt, p_val, edge_cnt, init_scale, tmp_uid):\n # print(\"------------------- Started A New Iteration \" + str(tmp_uid) + \" ---------------------\")\n file_location = './Output/' + str(tmp_uid) + \"/\"\n if not os.path.exists(file_location):\n os.makedirs(file_location)\n print_progress_bar() # print(\"Create Scale-free Graph\")\n\n st_graph_obj = Graph(n=node_cnt, p=p_val, e=edge_cnt, init_scale=init_scale)\n st_graph_obj.create_scale_free_graph()\n st_nx_graph = st_graph_obj.get_graph()\n\n print_progress_bar() # print(\"Create Random Graph\")\n\n nd_graph_obj = Graph(n=node_cnt, p=p_val, e=len(st_nx_graph.edges))\n nd_graph_obj.create_random_graph()\n nd_nx_graph = nd_graph_obj.get_graph()\n\n print_progress_bar() # print(\"Join Both Graph\")\n\n rd_nx_graph = st_graph_obj.compose_graph(nd_nx_graph)\n\n rd_nx_degree_sequence = [d for n, d in rd_nx_graph.degree()]\n\n th_nx_graph = get_graph_based_degree_sequence(rd_nx_degree_sequence, file_location)\n print_progress_bar() # print(\"Write Graphs to files\")\n\n nx.write_pajek(st_nx_graph, file_location + \"random.net\")\n nx.write_pajek(nd_nx_graph, file_location + \"scale-free.net\")\n nx.write_pajek(th_nx_graph,\n file_location + \"randomForMotifs.net\")\n rd_nx_graph_nl_lst = export_to_pajek(rd_nx_graph, st_graph_obj.n)\n\n with open(file_location + 'combinedFixed.net', 'w') as f:\n for item in rd_nx_graph_nl_lst:\n f.write(\"%s\\n\" % item)\n\n print_progress_bar() # print(\"Create Motifs\")\n motifs = create_motifs(init_scale=init_scale, p_val=p_val, file_location=file_location)\n print_progress_bar() # print(\"Calculate Similarity \")\n motifs_result = motifs_main_calculation(motifs, rd_nx_graph, th_nx_graph, file_location)\n print_progress_bar() # print(\"Calculate Z-Score\")\n z_score_result = calc_z_score(motifs_result, tmp_uid, file_location)\n max_z_score = 0\n max_z_score_key = None\n for tmp_k in z_score_result:\n if z_score_result[tmp_k] > max_z_score:\n max_z_score = z_score_result[tmp_k]\n max_z_score_key = tmp_k\n\n rd_nx_graph_nl_lst = export_to_pajek_by_z_score(rd_nx_graph, st_graph_obj.n,\n motifs_result[\"relGraph\"][max_z_score_key],\n file_location)\n sf_good_separate_cnt = 0\n rnd_good_separate_cnt = 0\n sf_bad_separate_cnt = 0\n rnd_bad_separate_cnt = 0\n with open(file_location + 'combinedFixed.net', 'w') as f:\n for item in rd_nx_graph_nl_lst:\n if \"ellipse\" in item:\n if \"Black\" in item:\n sf_good_separate_cnt = sf_good_separate_cnt + 1\n else:\n sf_bad_separate_cnt = sf_bad_separate_cnt + 1\n elif \"box\" in item:\n if \"Black\" in item:\n rnd_bad_separate_cnt = rnd_bad_separate_cnt + 1\n else:\n rnd_good_separate_cnt = rnd_good_separate_cnt + 1\n\n f.write(\"%s\\n\" % item)\n\n y_data = [sf_good_separate_cnt / node_cnt,\n sf_bad_separate_cnt / node_cnt,\n rnd_good_separate_cnt / node_cnt,\n rnd_bad_separate_cnt / node_cnt]\n\n x_data = [\"Correct S-F\", \"Incorrect S-F\", \"Correct Rand\", \"Incorrect Rand\"]\n bar_obj = plt.bar(x_data, y_data)\n bar_obj[0].set_color('g')\n bar_obj[1].set_color('r')\n bar_obj[2].set_color('g')\n bar_obj[3].set_color('r')\n for xy_data in zip(x_data, y_data):\n plt.annotate('{:.0%}'.format(xy_data[1]), xy=xy_data, textcoords='data')\n plt.savefig(file_location + \"Bar-\" + str(tmp_uid) + \".png\", dpi=300)\n plt.clf()\n plt.cla()\n # print(\"---------------------------- End Iteration ------------------------\\n\\n\\n\\n\")\n return y_data\n\n\nif __name__ == \"__main__\":\n node_cnt_m = 504\n p_val_m = 0.01\n edge_cnt_m = 1000\n init_scale_m = 10\n args = []\n for arg in sys.argv[1:]:\n args.append(arg)\n if not args:\n print(\"Could not Find argument\")\n exit(1)\n try:\n args[0] = int(args[0])\n except ValueError:\n print(\"Argument must be an integer \")\n print(\"Usage : main.py X (Where X is an integer)\")\n exit(2)\n\n start = time.time()\n\n if not os.path.exists(\"./Output\"):\n os.makedirs(\"./Output\")\n results = dict()\n run_rng = range(args[0])\n\n glob_itrs = len(run_rng) * 7 + 1\n glob_req_time = str(datetime.timedelta(seconds=20 * (len(run_rng))))\n print(\"Estimated Time To Complete - \" + glob_req_time)\n for i in run_rng:\n tmp = start_here(node_cnt_m, p_val_m, edge_cnt_m, init_scale_m, i + 1)\n results[i + 1] = tmp\n\n sf_good_separate_cnt_m = []\n sf_bad_separate_cnt_m = []\n rnd_good_separate_cnt_m = []\n rnd_bad_separate_cnt_m = []\n\n for k in results:\n sf_good_separate_cnt_m.append(results[k][0])\n sf_bad_separate_cnt_m.append(results[k][1])\n rnd_good_separate_cnt_m.append(results[k][2])\n rnd_bad_separate_cnt_m.append(results[k][3])\n\n y = [statistics.mean(sf_good_separate_cnt_m),\n statistics.mean(sf_bad_separate_cnt_m),\n statistics.mean(rnd_good_separate_cnt_m),\n statistics.mean(rnd_bad_separate_cnt_m)]\n x = [\"Correct S-F\", \"Incorrect S-F\", \"Correct Rand\", \"Incorrect Rand\"]\n\n barObj_m = plt.bar(x, y)\n barObj_m[0].set_color('g')\n barObj_m[1].set_color('r')\n barObj_m[2].set_color('g')\n barObj_m[3].set_color('r')\n for xy in zip(x, y):\n plt.annotate('{:.0%}'.format(xy[1]), xy=xy, textcoords='data')\n plt.savefig(\"./Output/Bar.png\", dpi=300)\n print_progress_bar()\n end = time.time()\n print(\"---------------- executing time --------------\")\n print(end - start)\n print(\"---------------- executing time --------------\")\n"
] | [
[
"matplotlib.pyplot.cla",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.bar"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lidq92/MDTVSFA | [
"22f49a9c1b2faec4a643c92b0f6b69297f4e4121",
"22f49a9c1b2faec4a643c92b0f6b69297f4e4121"
] | [
"VQAloss.py",
"VQAdataset.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass VQALoss(nn.Module):\n def __init__(self, scale, loss_type='mixed', m=None):\n super(VQALoss, self).__init__()\n self.loss_type = loss_type\n self.scale = scale\n self.m = m #\n\n def forward(self, y_pred, y):\n relative_score, mapped_score, aligned_score = y_pred\n if self.loss_type == 'mixed':\n loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) +\n F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]\n elif self.loss_type == 'correlation' or self.loss_type == 'rank+plcc':\n loss = [loss_a(mapped_score[d], y[d]) + loss_m(relative_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'rank':\n loss = [loss_m(relative_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'plcc':\n loss = [loss_a(mapped_score[d], y[d]) for d in range(len(y))]\n elif self.loss_type == 'rank+l1':\n loss = [loss_m(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]\n elif self.loss_type == 'plcc+l1':\n loss = [loss_a(relative_score[d], y[d]) + F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y)) for d in range(len(y))]\n elif 'naive' in self.loss_type:\n aligned_scores = torch.cat([(aligned_score[d]-self.m[d])/self.scale[d] for d in range(len(y))])\n ys = torch.cat([(y[d]-self.m[d])/self.scale[d] for d in range(len(y))])\n if self.loss_type == 'naive0':\n return F.l1_loss(aligned_scores, ys) # \n return loss_a(aligned_scores, ys) + loss_m(aligned_scores, ys) + F.l1_loss(aligned_scores, ys)\n else: # default l1\n loss = [F.l1_loss(aligned_score[d], y[d]) / self.scale[d] for d in range(len(y))]\n # print(loss)\n # sum_loss = sum([lossi for lossi in loss]) / len(loss)\n # sum_loss = len(loss) / sum([1 / lossi for lossi in loss])\n sum_loss = sum([torch.exp(lossi) * lossi for lossi in loss]) / sum([torch.exp(lossi) for lossi in loss])\n return sum_loss\n\n\ndef loss_m(y_pred, y):\n \"\"\"prediction monotonicity related loss\"\"\"\n assert y_pred.size(0) > 1 #\n return torch.sum(F.relu((y_pred-y_pred.t()) * torch.sign((y.t()-y)))) / y_pred.size(0) / (y_pred.size(0)-1)\n\n\ndef loss_a(y_pred, y):\n \"\"\"prediction accuracy related loss\"\"\"\n assert y_pred.size(0) > 1 #\n return (1 - torch.cosine_similarity(y_pred.t() - torch.mean(y_pred), y.t() - torch.mean(y))[0]) / 2\n\n",
"import h5py\nimport torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\n\nclass VQADataset(Dataset):\n def __init__(self, args, datasets, status='train'):\n self.status = status\n self.datasets = datasets\n self.crop_length = args.crop_length\n\n max_len = dict()\n self.M = dict()\n self.m = dict()\n self.scale = dict()\n self.index = dict()\n\n for dataset in datasets:\n Info = h5py.File(args.data_info[dataset], 'r')\n max_len[dataset] = int(Info['max_len'][0])\n\n self.M[dataset] = Info['scores'][0, :].max()\n self.m[dataset] = Info['scores'][0, :].min()\n self.scale[dataset] = self.M[dataset] - self.m[dataset]\n\n index = Info['index']\n index = index[:, args.exp_id % index.shape[1]]\n ref_ids = Info['ref_ids'][0, :]\n if status == 'train':\n index = index[0:int(args.train_proportion * args.train_ratio * len(index))]\n elif status == 'val':\n index = index[int(args.train_ratio * len(index)):int((0.5 + args.train_ratio / 2) * len(index))]\n elif status == 'test':\n index = index[int((0.5 + args.train_ratio / 2) * len(index)):len(index)]\n self.index[dataset] = []\n for i in range(len(ref_ids)):\n if ref_ids[i] in index:\n self.index[dataset].append(i)\n print(\"# {} images from {}: {}\".format(status, dataset, len(self.index[dataset])))\n print(\"Ref Index: \")\n print(index.astype(int))\n\n max_len_all = max(max_len.values())\n self.features, self.length, self.label, self.KCL, self.N = dict(), dict(), dict(), dict(), dict()\n for dataset in datasets:\n N = len(self.index[dataset])\n self.N[dataset] = N\n self.features[dataset] = np.zeros((N, max_len_all, args.feat_dim), dtype=np.float32)\n self.length[dataset] = np.zeros(N, dtype=np.int)\n self.label[dataset] = np.zeros((N, 1), dtype=np.float32)\n self.KCL[dataset] = []\n for i in range(N):\n features = np.load(args.features_dir[dataset] + str(self.index[dataset][i]) + '_' + args.feature_extractor +'_last_conv.npy')\n self.length[dataset][i] = features.shape[0]\n self.features[dataset][i, :features.shape[0], :] = features\n mos = np.load(args.features_dir[dataset] + str(self.index[dataset][i]) + '_score.npy') #\n self.label[dataset][i] = mos\n self.KCL[dataset].append(dataset)\n\n def __len__(self):\n return max(self.N.values())\n\n def __getitem__(self, idx):\n data = [(self.features[dataset][idx % self.N[dataset]],\n self.length[dataset][idx % self.N[dataset]],\n self.KCL[dataset][idx % self.N[dataset]]) for dataset in self.datasets]\n label = [self.label[dataset][idx % self.N[dataset]] for dataset in self.datasets]\n return data, label\n\n\ndef get_data_loaders(args):\n \"\"\" Prepare the train-val-test data\n :param args: related arguments\n :return: train_loader, val_loader, test_loader\n \"\"\"\n train_dataset = VQADataset(args, args.datasets['train'], 'train')\n train_loader = torch.utils.data.DataLoader(train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2,\n drop_last=True) #\n\n scale = train_dataset.scale\n m = train_dataset.m\n\n val_loader, test_loader = dict(), dict()\n for dataset in args.datasets['val']:\n val_dataset = VQADataset(args, [dataset], 'val')\n val_loader[dataset] = torch.utils.data.DataLoader(val_dataset)\n\n for dataset in args.datasets['test']:\n test_dataset = VQADataset(args, [dataset], 'test')\n if dataset not in args.datasets['train']:\n scale[dataset] = test_dataset.scale[dataset]\n m[dataset] = test_dataset.m[dataset]\n test_loader[dataset] = torch.utils.data.DataLoader(test_dataset)\n\n return train_loader, val_loader, test_loader, scale, m\n"
] | [
[
"torch.exp",
"torch.mean",
"torch.nn.functional.l1_loss"
],
[
"torch.utils.data.DataLoader",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
7FM/OpenRadar | [
"d90eea23feb062830dd71b00064f06f70ba6783c",
"d90eea23feb062830dd71b00064f06f70ba6783c"
] | [
"mmwave/dsp/doppler_processing.py",
"mmwave/dsp/cfar.py"
] | [
"# Copyright 2019 The OpenRadar Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nfrom numba import njit, jit\nfrom . import compensation\nfrom . import utils\n\n\ndef doppler_resolution(band_width, start_freq_const=77, ramp_end_time=62, idle_time_const=100, num_loops_per_frame=128,\n num_tx_antennas=3):\n \"\"\"Calculate the doppler resolution for the given radar configuration.\n\n Args:\n start_freq_const (float): Frequency chirp starting point.\n ramp_end_time (float): Frequency chirp end point.\n idle_time_const (int): Idle time between chirps.\n band_width (float): Radar config bandwidth.\n num_loops_per_frame (int): The number of loops in each frame.\n num_tx_antennas (int): The number of transmitting antennas (tx) on the radar.\n\n Returns:\n doppler_resolution (float): The doppler resolution for the given radar configuration.\n\n \"\"\"\n\n light_speed_meter_per_sec = 299792458\n\n center_frequency = start_freq_const * 1e9 + band_width / 2\n chirp_interval = (ramp_end_time + idle_time_const) * 1e-6\n doppler_resolution = light_speed_meter_per_sec / (\n 2 * num_loops_per_frame * num_tx_antennas * center_frequency * chirp_interval)\n\n return doppler_resolution\n\ndef separate_tx(signal, num_tx, vx_axis=1, axis=0):\n \"\"\"Separate interleaved radar data from separate TX along a certain axis to account for TDM radars.\n\n Args:\n signal (ndarray): Received signal.\n num_tx (int): Number of transmit antennas.\n vx_axis (int): Axis in which to accumulate the separated data.\n axis (int): Axis in which the data is interleaved.\n\n Returns:\n ndarray: Separated received data in the\n\n \"\"\"\n # Reorder the axes\n reordering = np.arange(len(signal.shape))\n reordering[0] = axis\n reordering[axis] = 0\n if not (reordering == np.arange(len(reordering))).all(): # check if has to reorder\n signal = signal.transpose(reordering)\n\n # if signal.shape[1] != num_tx * signal.shape[1]:\n # pass\n\n out = np.concatenate([signal[i::num_tx, ...] for i in range(num_tx)], axis=vx_axis)\n\n return out.transpose(reordering)\n\n\ndef doppler_processing(radar_cube,\n num_tx_antennas=2,\n clutter_removal_enabled=False,\n interleaved=True,\n window_type_2d=None,\n accumulate=True):\n \"\"\"Perform 2D FFT on the radar_cube.\n\n Interleave the radar_cube, perform optional windowing and 2D FFT on the radar_cube. Optional antenna couping\n signature removal can also be performed right before 2D FFT. In constrast to the original TI codes, CFAR and peak\n grouping are intentionally separated with 2D FFT for the easiness of debugging.\n\n Args:\n radar_cube (ndarray): Output of the 1D FFT. If not interleaved beforehand, it has the shape of\n (numChirpsPerFrame, numRxAntennas, numRangeBins). Otherwise, it has the shape of \n (numRangeBins, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler\n dimension is located at the last axis.\n num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.\n clutter_removal_enabled (boolean): Flag to enable naive clutter removal.\n interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not\n interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numRangeBins). The interleaving\n process will transform it such that it becomes (numRangeBins, numVirtualAntennas, num_doppler_bins). Note\n that this interleaving is only applicable to TDM radar, i.e. each tx emits the chirp sequentially.\n window_type_2d (mmwave.dsp.utils.Window): Optional windowing type before doppler FFT.\n accumulate (boolean): Flag to reduce the numVirtualAntennas dimension.\n \n Returns:\n detMatrix (ndarray): (numRangeBins, num_doppler_bins) complete range-dopper information. Original datatype is\n uint16_t. Note that azimuthStaticHeatMap can be extracted from zero-doppler index for\n visualization.\n aoa_input (ndarray): (numRangeBins, numVirtualAntennas, num_doppler_bins) ADC data reorganized by vrx instead of\n physical rx.\n \"\"\"\n\n if interleaved:\n # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,\n # and even are from tx2) so it becomes ( , numVirtualAntennas, numADCSamples), where \n # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.\n # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.\n fft2d_in = separate_tx(radar_cube, num_tx_antennas, vx_axis=1, axis=0)\n else:\n fft2d_in = radar_cube\n \n # (Optional) Static Clutter Removal\n if clutter_removal_enabled:\n # fft2d_in = compensation.clutter_removal(fft2d_in, axis=0)\n fft2d_in[1:] = compensation.clutter_removal(fft2d_in[1:], axis=0) # TODO this or above with static detection removal\n\n # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)\n fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))\n\n # Windowing 16x32\n if window_type_2d:\n fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=2)\n\n # It is assumed that doppler is at the last axis.\n # FFT 32x32\n fft2d_out = np.fft.fft(fft2d_in)\n aoa_input = fft2d_out\n\n # Save zero-Doppler as azimuthStaticHeatMap, watch out for the bit shift in\n # original code.\n\n # Log_2 Absolute Value\n fft2d_log_abs = np.log2(np.abs(fft2d_out))\n\n # Accumulate\n if accumulate:\n return np.sum(fft2d_log_abs, axis=1), aoa_input # TODO divide by num_rx?\n else:\n return fft2d_log_abs, aoa_input\n\n\ndef doppler_estimation(radar_cube,\n beam_weights,\n num_tx_antennas=2,\n clutter_removal_enabled=False,\n interleaved=False,\n window_type_2d=None):\n \"\"\"Perform doppler estimation on the weighted sum of range FFT output across all virtual antennas.\n \n In contrast to directly computing doppler FFT from the output of range FFT, this function combines it across all \n the virtual receivers first using the weights generated from beamforming. Then FFT is performed and argmax is taken\n across each doppler axis to return the indices of max doppler values.\n \n Args:\n radar_cube (ndarray): Output of the 1D FFT with only ranges on detected objects. If not interleaved beforehand,\n it has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). Otherwise, it has the shape of \n (numDetObjs, numVirtualAntennas, num_doppler_bins). It is assumed that after interleaving the doppler\n dimension is located at the last axis.\n beam_weights (ndarray): Weights to sum up the radar_cube across the virtual receivers. It is from the\n beam-forming and has the shape of (numVirtualAntennas, numDetObjs)\n num_tx_antennas (int): Number of transmitter antennas. This affects how interleaving is performed.\n clutter_removal_enabled (boolean): Flag to enable naive clutter removal.\n interleaved (boolean): If the input radar_cube is interleaved before passing in. The default radar_cube is not\n interleaved, i.e. has the shape of (numChirpsPerFrame, numRxAntennas, numDetObjs). The interleaveing process\n will transform it such that it becomes (numDetObjs, numVirtualAntennas, num_doppler_bins). Note that this\n interleaving is only appliable to TDM radar, i.e. each tx emits the chirp sequentially.\n window_type_2d (string): Optional windowing type before doppler FFT.\n \n Returns:\n doppler_est (ndarray): (numDetObjs) Doppler index for each detected objects. Positive index means moving away\n from radar while negative index means moving towards the radar.\n \"\"\"\n fft2d_in = None\n if not interleaved:\n num_doppler_bins = radar_cube.shape[0] / num_tx_antennas\n # radar_cube is interleaved in the first dimension (for 2 tx and 0-based indexing, odd are the chirps from tx1,\n # and even are from tx2) so it becomes (num_doppler_bins, numVirtualAntennas, numADCSamples), where\n # numChirpsPerFrame = num_doppler_bins * num_tx_antennas as designed.\n # Antennas associated to tx1 (Ping) are 0:4 and to tx2 (Pong) are 5:8.\n if num_tx_antennas == 2:\n fft2d_in = np.concatenate((radar_cube[0::2, ...], radar_cube[1::2, ...]), axis=1)\n elif num_tx_antennas == 3:\n fft2d_in = np.concatenate((radar_cube[0::3, ...], radar_cube[1::3, ...], radar_cube[2::3, ...]), axis=1)\n\n # transpose to (numRangeBins, numVirtualAntennas, num_doppler_bins)\n fft2d_in = np.transpose(fft2d_in, axes=(2, 1, 0))\n else:\n num_doppler_bins = radar_cube.shape[2]\n\n # (Optional) Static Clutter Removal\n if clutter_removal_enabled:\n fft2d_in = compensation.clutter_removal(fft2d_in)\n\n # Weighted sum across all virtual receivers.\n fft2d_in = np.einsum('ijk,jk->ik', fft2d_in, beam_weights)\n\n # Windowing 16x32\n if window_type_2d:\n fft2d_in = utils.windowing(fft2d_in, window_type_2d, axis=1)\n\n # It is assumed that doppler is at the last axis.\n # FFT 32x32\n doppler_est = np.fft.fft(fft2d_in)\n doppler_est = np.argmax(doppler_est, axis=1)\n doppler_est[doppler_est[:] >= num_doppler_bins] -= num_doppler_bins * 2\n\n return doppler_est\n",
"# Copyright 2019 The OpenRadar Authors. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport numpy as np\nfrom scipy.ndimage import convolve1d\n\n\"\"\" Various cfar algorithm types\n\nFrom https://www.mathworks.com/help/phased/ug/constant-false-alarm-rate-cfar-detectors.html\n|-----------------------------------------------------------------------------------------------------------|\n| Algorithm | Typical Usage |\n|-----------------------------------------------------------------------------------------------------------|\n| Cell-averaging CFAR | Most situations |\n| Greatest-of cell-averaging CFAR | When it is important to avoid false alarms at the edge of clutter |\n| Smallest-of cell-averaging CFAR | When targets are closely located |\n| Order statistic CFAR | Compromise between greatest-of and smallest-of cell averaging |\n|-----------------------------------------------------------------------------------------------------------|\n\n\"\"\"\n\n\ndef ca(x, *argv, **kwargs):\n \"\"\"Detects peaks in signal using Cell-Averaging CFAR (CA-CFAR).\n\n Args:\n x (~numpy.ndarray): Signal.\n *argv: See mmwave.dsp.cfar.ca\\_\n **kwargs: See mmwave.dsp.cfar.ca\\_\n\n Returns:\n ~numpy.ndarray: Boolean array of detected peaks in x.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.ca(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> det\n array([False, False, True, False, False, False, False, True, False,\n True])\n\n Perform a non-wrapping CFAR\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.ca(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> det\n array([False, True, True, False, False, False, False, True, True,\n True])\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n threshold, _ = ca_(x, *argv, **kwargs)\n ret = (x > threshold)\n return ret\n\n\nca_kernel = None\n\n\ndef ca_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000, new_kernel=False):\n \"\"\"Uses Cell-Averaging CFAR (CA-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.\n\n Args:\n x (~numpy.ndarray): Signal.\n guard_len (int): Number of samples adjacent to the CUT that are ignored.\n noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.\n mode (str): Specify how to deal with edge cells. Examples include 'wrap' and 'constant'.\n l_bound (float or int): Additive lower bound while calculating peak threshold.\n\n Returns:\n Tuple [ndarray, ndarray]\n 1. (ndarray): Upper bound of noise threshold.\n #. (ndarray): Raw noise strength.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> threshold\n (array([70, 76, 64, 79, 81, 91, 74, 71, 70, 79]), array([50, 56, 44, 59, 61, 71, 54, 51, 50, 59]))\n\n Perform a non-wrapping CFAR thresholding\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.ca_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> threshold\n (array([44, 37, 41, 65, 81, 91, 67, 51, 34, 46]), array([24, 17, 21, 45, 61, 71, 47, 31, 14, 26]))\n\n \"\"\"\n global ca_kernel\n if isinstance(x, list):\n x = np.array(x)\n assert type(x) == np.ndarray\n\n if ca_kernel is None or new_kernel:\n ca_kernel = np.ones(1 + (2 * guard_len) + (2 * noise_len), dtype=x.dtype) / (2 * noise_len)\n ca_kernel[noise_len:noise_len + (2 * guard_len) + 1] = 0\n\n noise_floor = convolve1d(x, ca_kernel, mode=mode)\n threshold = noise_floor + l_bound\n\n return threshold, noise_floor\n\n\ndef caso(x, *argv, **kwargs):\n \"\"\"Detects peaks in signal using Cell-Averaging Smallest-Of CFAR (CASO-CFAR).\n\n Args:\n x (~numpy.ndarray): Signal.\n *argv: See mmwave.dsp.cfar.caso\\_\n **kwargs: See mmwave.dsp.cfar.caso\\_\n\n Returns:\n ~numpy.ndarray: Boolean array of detected peaks in x.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.caso(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> det\n array([False, False, True, False, False, False, False, True, True,\n True])\n\n Perform a non-wrapping CFAR\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.caso(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> det\n array([False, True, True, False, False, False, False, True, True,\n True])\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n threshold, _ = caso_(x, *argv, **kwargs)\n ret = (x > threshold)\n return ret\n\n\ndef caso_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000):\n \"\"\"Uses Cell-Averaging Smallest-Of CFAR (CASO-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.\n\n Args:\n x (~numpy.ndarray): Signal.\n guard_len (int): Number of samples adjacent to the CUT that are ignored.\n noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.\n mode (str): Specify how to deal with edge cells.\n l_bound (float or int): Additive lower bound while calculating peak threshold.\n\n Returns:\n Tuple [ndarray, ndarray]\n 1. (ndarray): Upper bound of noise threshold.\n #. (ndarray): Raw noise strength.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.caso_(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> (threshold[0].astype(int), threshold[1].astype(int))\n (array([69, 55, 49, 72, 72, 86, 69, 55, 49, 72]), array([49, 35, 29, 52, 52, 66, 49, 35, 29, 52]))\n\n Perform a non-wrapping CFAR thresholding\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.caso_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> (threshold[0].astype(int), threshold[1].astype(int))\n (array([69, 55, 49, 72, 72, 86, 69, 55, 49, 72]), array([49, 35, 29, 52, 52, 66, 49, 35, 29, 52]))\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n\n l_window, r_window = _cfar_windows(x, guard_len, noise_len, mode)\n\n # Generate scaling based on mode\n l_window = l_window / noise_len\n r_window = r_window / noise_len\n if mode == 'wrap':\n noise_floor = np.minimum(l_window, r_window)\n elif mode == 'constant':\n edge_cells = guard_len + noise_len\n noise_floor = np.minimum(l_window, r_window)\n noise_floor[:edge_cells] = r_window[:edge_cells]\n noise_floor[-edge_cells:] = l_window[-edge_cells:]\n else:\n raise ValueError(f'Mode {mode} is not a supported mode')\n\n threshold = noise_floor + l_bound\n return threshold, noise_floor\n\n\ndef cago(x, *argv, **kwargs):\n \"\"\"Detects peaks in signal using Cell-Averaging Greatest-Of CFAR (CAGO-CFAR).\n\n Args:\n x (~numpy.ndarray): Signal.\n *argv: See mmwave.dsp.cfar.cago\\_\n **kwargs: See mmwave.dsp.cfar.cago\\_\n\n Returns:\n ~numpy.ndarray: Boolean array of detected peaks in x.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.cago(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> det\n array([False, False, True, False, False, False, False, True, False,\n False])\n\n Perform a non-wrapping CFAR\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.cago(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> det\n array([False, True, True, False, False, False, False, True, True,\n True])\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n threshold, _ = cago_(x, *argv, **kwargs)\n ret = (x > threshold)\n return ret\n\n\ndef cago_(x, guard_len=4, noise_len=8, mode='wrap', l_bound=4000):\n \"\"\"Uses Cell-Averaging Greatest-Of CFAR (CAGO-CFAR) to calculate a threshold that can be used to calculate peaks in a signal.\n\n Args:\n x (~numpy.ndarray): Signal.\n guard_len (int): Number of samples adjacent to the CUT that are ignored.\n noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.\n mode (str): Specify how to deal with edge cells.\n l_bound (float or int): Additive lower bound while calculating peak threshold.\n\n Returns:\n Tuple [ndarray, ndarray]\n 1. (ndarray): Upper bound of noise threshold.\n #. (ndarray): Raw noise strength.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.cago_(signal, l_bound=20, guard_len=1, noise_len=3)\n >>> (threshold[0].astype(int), threshold[1].astype(int))\n (array([72, 97, 80, 87, 90, 97, 80, 87, 90, 86]), array([52, 77, 60, 67, 70, 77, 60, 67, 70, 66]))\n\n Perform a non-wrapping CFAR thresholding\n\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.cago_(signal, l_bound=20, guard_len=1, noise_len=3, mode='constant')\n >>> (threshold[0].astype(int), threshold[1].astype(int))\n (array([69, 55, 49, 72, 90, 97, 69, 55, 49, 72]), array([49, 35, 29, 52, 70, 77, 49, 35, 29, 52]))\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n\n l_window, r_window = _cfar_windows(x, guard_len, noise_len, mode)\n\n # Generate scaling based on mode\n l_window = l_window / noise_len\n r_window = r_window / noise_len\n if mode == 'wrap':\n noise_floor = np.maximum(l_window, r_window)\n elif mode == 'constant':\n edge_cells = guard_len + noise_len\n noise_floor = np.maximum(l_window, r_window)\n noise_floor[:edge_cells] = r_window[:edge_cells]\n noise_floor[-edge_cells:] = l_window[-edge_cells:]\n else:\n raise ValueError(f'Mode {mode} is not a supported mode')\n\n threshold = noise_floor + l_bound\n return threshold, noise_floor\n\n\ndef os(x, *argv, **kwargs):\n \"\"\"Performs Ordered-Statistic CFAR (OS-CFAR) detection on the input array.\n\n Args:\n x (~numpy.ndarray): Noisy array to perform cfar on with log values\n *argv: See mmwave.dsp.cfar.os\\_\n **kwargs: See mmwave.dsp.cfar.os\\_\n\n\n Returns:\n ~numpy.ndarray: Boolean array of detected peaks in x.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> det = mm.dsp.os(signal, k=3, scale=1.1, guard_len=0, noise_len=3)\n >>> det\n array([False, True, True, False, False, False, False, True, False,\n True])\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x)\n threshold, _ = os_(x, *argv, **kwargs)\n ret = (x > threshold)\n return ret\n\n\ndef os_(x, guard_len=0, noise_len=8, k=12, scale=1.0):\n \"\"\"Performs Ordered-Statistic CFAR (OS-CFAR) detection on the input array.\n\n Args:\n x (~numpy.ndarray): Noisy array to perform cfar on with log values\n guard_len (int): Number of samples adjacent to the CUT that are ignored.\n noise_len (int): Number of samples adjacent to the guard padding that are factored into the calculation.\n k (int): Ordered statistic rank to sample from.\n scale (float): Scaling factor.\n\n Returns:\n Tuple [ndarray, ndarray]\n 1. (ndarray): Upper bound of noise threshold.\n #. (ndarray): Raw noise strength.\n\n Examples:\n >>> signal = np.random.randint(100, size=10)\n >>> signal\n array([41, 76, 95, 28, 25, 53, 10, 93, 54, 85])\n >>> threshold = mm.dsp.os_(signal, k=3, scale=1.1, guard_len=0, noise_len=3)\n >>> (threshold[0].astype(int), threshold[1].astype(int))\n (array([93, 59, 58, 58, 83, 59, 59, 58, 83, 83]), array([85, 54, 53, 53, 76, 54, 54, 53, 76, 76]))\n\n \"\"\"\n if isinstance(x, list):\n x = np.array(x, dtype=np.uint32)\n\n n = len(x)\n noise_floor = np.zeros(n)\n threshold = np.zeros(n, dtype=np.float32)\n cut_idx = -1\n\n # Initial CUT\n left_idx = list(np.arange(n - noise_len - guard_len - 1, n - guard_len - 1))\n right_idx = list(np.arange(guard_len, guard_len + noise_len))\n\n # All other CUTs\n while cut_idx < (n - 1):\n cut_idx += 1\n\n left_idx.pop(0)\n left_idx.append((cut_idx - 1) % n)\n\n right_idx.pop(0)\n right_idx.append((cut_idx + guard_len + noise_len) % n)\n\n window = np.concatenate((x[left_idx], x[right_idx]))\n window.partition(k)\n noise_floor[cut_idx] = window[k]\n threshold[cut_idx] = noise_floor[cut_idx] * scale\n\n return threshold, noise_floor\n\n\ndef _cfar_windows(x, guard_len, noise_len, mode):\n if type(x) != np.ndarray:\n raise TypeError(f'Expected array-like input got {type(x)}')\n\n # Create kernels\n r_kernel = np.zeros(1 + (2 * guard_len) + (2 * noise_len), dtype=x.dtype)\n r_kernel[:noise_len] = 1\n l_kernel = r_kernel[::-1]\n\n # Do initial convolutions\n l_window = convolve1d(x, l_kernel, mode=mode)\n r_window = convolve1d(x, r_kernel, mode=mode)\n\n return l_window, r_window\n\n\nWRAP_UP_LIST_IDX = lambda x, total: x if x >= 0 else x + total\nWRAP_DN_LIST_IDX = lambda x, total: x if x < total else x - total\nWRAP_DOPPLER_IDX = lambda x, num_doppler_bins: np.bitwise_and(x, num_doppler_bins - 1)\nDOPPLER_IDX_TO_SIGNED = lambda idx, fft_size: idx if idx < fft_size // 2 else idx - fft_size\n\n\ndef peak_grouping(obj_raw,\n det_matrix,\n num_doppler_bins,\n max_range_idx,\n min_range_idx,\n group_in_doppler_direction,\n group_in_range_direction):\n \"\"\"Performs peak grouping on detection Range/Doppler matrix.\n\n The function groups neighboring peaks into one. The grouping is done according to two input flags:\n group_in_doppler_direction and group_in_doppler_direction. For each detected peak the function checks if the peak is\n greater than its neighbors. If this is true, the peak is copied to the output list of detected objects. The\n neighboring peaks that are used for checking are taken from the detection matrix and copied into 3x3 kernel\n regardless of whether they are CFAR detected or not. Note: Function always reads 9 samples per detected object\n from L3 memory into local array tempBuff, but it only needs to read according to input flags. For example if only\n the group_in_doppler_direction flag is set, it only needs to read middle row of the kernel, i.e. 3 samples per\n target from detection matrix.\n\n Args:\n obj_raw (np.ndarray): (num_detected_objects, 3). detected objects from CFAR.\n det_matrix (np.ndarray): Range-doppler profile. shape is numRangeBins x num_doppler_bins.\n num_doppler_bins (int): number of doppler bins.\n max_range_idx (int): max range of detected objects.\n min_range_idx (int): min range of detected objects\n group_in_doppler_direction (int): flag to perform grouping along doppler direction.\n group_in_range_direction (int): flag to perform grouping along range direction.\n\n Returns:\n obj_out (np.ndarray): detected object after grouping.\n\n \"\"\"\n\n num_detected_objects = obj_raw.shape[0]\n\n num_obj_out = 0\n kernel = np.empty([9])\n\n if (group_in_doppler_direction == 1) and (group_in_range_direction == 1):\n # Grouping both in Range and Doppler direction\n start_ind = 0\n step_ind = 1\n end_ind = 8\n elif (group_in_doppler_direction == 0) and (group_in_range_direction == 1):\n # Grouping only in Range direction\n start_ind = 1\n step_ind = 3\n end_ind = 7\n elif (group_in_doppler_direction == 1) and (group_in_range_direction == 0):\n # Grouping only in Doppler direction */\n start_ind = 3\n step_ind = 1\n end_ind = 5\n else:\n # No grouping, copy all detected objects to the output matrix within specified min max range\n # num_detected_objects = min(num_detected_objects, MAX_OBJ_OUT)\n obj_out = obj_raw[obj_raw[:, RANGEIDX] <= max_range_idx and obj_raw[:, RANGEIDX] > min_range_idx]\n obj_out[:, DOPPLERIDX] = np.bitwise_and(obj_out[:, DOPPLERIDX], num_doppler_bins - 1)\n\n return obj_out\n\n # Start checking\n obj_out = np.zeros((num_obj_out, 3))\n for i in range(num_detected_objects):\n detected_obj_flag = 0\n range_idx = obj_raw[i, 0]\n doppler_idx = obj_raw[i, 1]\n peak_val = obj_raw[i, 2]\n\n if (range_idx <= max_range_idx) and (range_idx >= min_range_idx):\n detected_obj_flag = 1\n\n # Fill local 3x3 kernel from detection matrix in L3\n start_idx = (range_idx - 1) * num_doppler_bins\n temp_ptr = det_matrix[start_idx:]\n row_start = 0\n row_end = 2\n\n if range_idx == min_range_idx:\n start_idx = range_idx * num_doppler_bins\n temp_ptr = det_matrix[start_idx:]\n row_start = 1\n kernel[0] = 0\n kernel[1] = 0\n kernel[2] = 0\n elif range_idx == max_range_idx:\n row_end = 1\n kernel[6] = 0\n kernel[7] = 0\n kernel[8] = 0\n\n for j in range(row_start, row_end + 1):\n for k in range(3):\n\n temp_idx = doppler_idx + (k - 1)\n\n if temp_idx < 0:\n temp_idx += num_doppler_bins\n elif temp_idx >= num_doppler_bins:\n temp_idx -= num_doppler_bins\n\n kernel[j * 3 + k] = temp_ptr[temp_idx]\n\n temp_ptr = temp_ptr[num_doppler_bins:]\n\n # Compare the detected object to its neighbors\n # Detected object is at index 4\n for k in range(start_ind, end_ind + 1, step_ind):\n if kernel[k] > kernel[4]:\n detected_obj_flag = 0\n\n if detected_obj_flag == 1:\n obj_out[num_obj_out, 0] = range_idx\n obj_out[num_obj_out, 1] = DOPPLER_IDX_TO_SIGNED(doppler_idx, num_doppler_bins)\n obj_out[num_obj_out, 2] = peak_val\n num_obj_out += 1\n\n if num_obj_out >= MAX_OBJ_OUT:\n break\n\n return num_obj_out, obj_out\n\n\ndef peak_grouping_qualified(obj_raw,\n num_doppler_bins,\n max_range_idx,\n min_range_idx,\n group_in_doppler_direction,\n group_in_range_direction):\n \"\"\"Performs peak grouping on list of CFAR detected objects.\n\n The function groups neighboring peaks into one. The grouping is done according to two input flags:\n group_in_doppler_direction and group_in_doppler_direction. For each detected peak the function checks if the peak is\n greater than its neighbors. If this is true, the peak is copied to the output list of detected objects. The\n neighboring peaks that are used for checking are taken from the list of CFAR detected objects, (not from the\n detection matrix), and copied into 3x3 kernel that has been initialized to zero for each peak under test. If the\n neighboring cell has not been detected by CFAR, its peak value is not copied into the kernel. Note: Function always\n search for 8 peaks in the list, but it only needs to search according to input flags.\n\n Args:\n obj_raw (np.ndarray): (num_detected_objects, 3). detected objects from CFAR.\n num_doppler_bins (int): number of doppler bins.\n max_range_idx (int): max range of detected objects.\n min_range_idx (int): min range of detected objects\n group_in_doppler_direction (int): flag to perform grouping along doppler direction.\n group_in_range_direction (int): flag to perform grouping along range direction.\n\n Returns:\n obj_out (np.ndarray): detected object after grouping.\n\n \"\"\"\n\n num_detected_objects = obj_raw.shape[0]\n\n if (group_in_doppler_direction == 1) and (group_in_range_direction == 1):\n # Grouping both in Range and Doppler direction\n start_ind = 0\n step_ind = 1\n end_ind = 8\n elif (group_in_doppler_direction == 0) and (group_in_range_direction == 1):\n # Grouping only in Range direction\n start_ind = 1\n step_ind = 3\n end_ind = 7\n elif (group_in_doppler_direction == 1) and (group_in_range_direction == 0):\n # Grouping only in Doppler direction */\n start_ind = 3\n step_ind = 1\n end_ind = 5\n else:\n # No grouping, copy all detected objects to the output matrix within specified min max range\n num_detected_objects = min(num_detected_objects, MAX_OBJ_OUT)\n obj_out = obj_raw[(obj_raw['range_idx'][:num_detected_objects] <= max_range_idx) &\n (obj_raw['range_idx'][:num_detected_objects] > min_range_idx)]\n\n return obj_out\n\n # Start checking\n idx_obj_in_range = np.argwhere((obj_raw['range_idx'] <= max_range_idx) &\n (obj_raw['range_idx'] >= min_range_idx))[:, 0]\n\n obj_in_range = obj_raw[idx_obj_in_range]\n kernels = np.zeros((obj_in_range.shape[0], 9))\n detected_obj_flag = np.ones(obj_in_range.shape[0])\n\n # Populate the middle column.\n # Populate the 4th element.\n kernels[:, 4] = obj_in_range['peakVal']\n\n # Populate the 1st element.\n obj_in_range_previous = obj_raw[idx_obj_in_range - 1]\n assert obj_in_range_previous.shape == obj_in_range.shape, \"obj_in_range_previous indexing is wrong\"\n idx_temp = ((obj_in_range_previous['range_idx']) == (obj_in_range['range_idx'] - 1)) & \\\n ((obj_in_range_previous['doppler_idx']) == (obj_in_range['doppler_idx']))\n kernels[idx_temp, 1] = obj_in_range_previous['peakVal'][idx_temp]\n # 0th detected object has no left neighbor.\n kernels[idx_obj_in_range[idx_obj_in_range[:] == 0], 1] = 0\n\n # Populate the 7th element.\n obj_in_range_next = obj_raw[(idx_obj_in_range + 1) % num_detected_objects]\n assert obj_in_range_next.shape == obj_in_range.shape, \"obj_in_range_next indexing is wrong\"\n idx_temp = ((obj_in_range_next['range_idx']) == (obj_in_range['range_idx'] + 1)) & \\\n ((obj_in_range_next['doppler_idx']) == (obj_in_range['doppler_idx']))\n kernels[idx_temp, 7] = obj_in_range_next['peakVal'][idx_temp]\n # last detected object, i.e. num_detected_objects-th has no left neighbor.\n kernels[idx_obj_in_range[idx_obj_in_range[:] == num_detected_objects], 7] = 0\n\n for i, idxDeteced in enumerate(idx_obj_in_range):\n doppler_idx = obj_in_range['doppler_idx'][i]\n range_idx = obj_in_range['range_idx'][i]\n # Fill the left column\n k_left = WRAP_UP_LIST_IDX(idxDeteced - 1, num_detected_objects)\n k_right = WRAP_DN_LIST_IDX(idxDeteced + 1, num_detected_objects)\n for _ in range(num_detected_objects):\n k_left_doppler_idx = obj_raw['doppler_idx'][k_left]\n k_left_range_idx = obj_raw['range_idx'][k_left]\n k_left_peak_val = obj_raw['peakVal'][k_left]\n if k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 2, num_doppler_bins):\n break\n if k_left_range_idx == range_idx + 1 and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,\n num_doppler_bins):\n kernels[i, 6] = k_left_peak_val\n elif k_left_range_idx == range_idx and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,\n num_doppler_bins):\n kernels[i, 3] = k_left_peak_val\n elif k_left_range_idx == range_idx - 1 and k_left_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 1,\n num_doppler_bins):\n kernels[i, 0] = k_left_peak_val\n k_left = WRAP_UP_LIST_IDX(k_left - 1, num_detected_objects)\n\n k_right_doppler_idx = obj_raw['doppler_idx'][k_right]\n k_right_range_idx = obj_raw['range_idx'][k_right]\n k_right_peak_val = obj_raw['peakVal'][k_right]\n if k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx - 2, num_doppler_bins):\n break\n if k_right_range_idx == range_idx + 1 and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,\n num_doppler_bins):\n kernels[i, 8] = k_right_peak_val\n elif k_right_range_idx == range_idx and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,\n num_doppler_bins):\n kernels[i, 5] = k_right_peak_val\n elif k_right_range_idx == range_idx - 1 and k_right_doppler_idx == WRAP_DOPPLER_IDX(doppler_idx + 1,\n num_doppler_bins):\n kernels[i, 2] = k_right_peak_val\n k_right = WRAP_DN_LIST_IDX(k_right + 1, num_detected_objects)\n\n detected_obj_flag[np.argwhere(np.max(kernels[:, start_ind:end_ind:step_ind]) != kernels[:, 4])] = 0\n obj_out = obj_in_range[detected_obj_flag[:] == 1]\n\n if obj_out.shape[0] > MAX_OBJ_OUT:\n obj_out = obj_out[:MAX_OBJ_OUT, ...]\n\n return obj_out\n"
] | [
[
"numpy.abs",
"numpy.einsum",
"numpy.fft.fft",
"numpy.concatenate",
"numpy.argmax",
"numpy.transpose",
"numpy.sum"
],
[
"numpy.minimum",
"numpy.maximum",
"numpy.arange",
"numpy.ones",
"numpy.concatenate",
"numpy.bitwise_and",
"numpy.argwhere",
"numpy.max",
"numpy.array",
"numpy.zeros",
"numpy.empty",
"scipy.ndimage.convolve1d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
amckenna41/CDBLSTM_PSP | [
"d4e5d874af65c1264c3a459ecad19e71610d1f82"
] | [
"psp/main_gcp.py"
] | [
"################################################################################\n##### Entry script for psp_gcp dir for training on Google Cloud Platform #####\n################################################################################\n\n#import required modules and dependancies\nimport tensorflow as tf\nimport argparse\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.layers import Bidirectional, LSTM, Input, Conv1D, \\\n Embedding, Dense, Dropout, Activation, Concatenate, Reshape,MaxPooling1D, Convolution1D,BatchNormalization\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\nfrom tensorflow.keras.callbacks import EarlyStopping ,ModelCheckpoint, TensorBoard, \\\n ReduceLROnPlateau, LearningRateScheduler, CSVLogger\nfrom tensorflow.keras.metrics import AUC, MeanSquaredError, FalseNegatives, FalsePositives, \\\n MeanAbsoluteError, TruePositives, TrueNegatives, Precision, Recall\nfrom tensorflow.keras import activations\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.compat.v1.keras.backend import set_session\nimport os\nfrom os.path import isfile, join\nfrom os import listdir\nimport sys\nimport time\nimport importlib\nimport pkgutil\nimport json\nfrom google.cloud import storage\nfrom json.decoder import JSONDecodeError\nfrom psp.load_dataset import *\nfrom psp.plot_model import *\nfrom psp.gcp_utils import *\nfrom psp._globals import *\nfrom psp.evaluate import *\nfrom psp.models import *\nfrom psp.models.auxiliary_models import *\n\nimport warnings\nwarnings.filterwarnings(\"ignore\", message=r\"Passing\", category=FutureWarning)\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' #reduce TF log output to only include Errors\n\n### Tensorboard parameters and configuration ###\ntf.compat.v1.reset_default_graph()\ntf.keras.backend.clear_session() # For easy reset of notebook state.\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)\nconfig_proto = tf.compat.v1.ConfigProto()\ntf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)\nconfig_proto.allow_soft_placement = True\noff = rewriter_config_pb2.RewriterConfig.OFF\nconfig_proto.gpu_options.allow_growth = True\nconfig_proto.graph_options.rewrite_options.arithmetic_optimization = off\n#set tensorflow GPUOptions so TF doesn't overload GPU if present\n# config_proto.gpu_options(per_process_gpu_memory_fraction=0.333)\nsession = tf.compat.v1.Session(config=config_proto)\n\n# tf.Session(config=tf.compat.v1.ConfigProto(log_device_placement=True))\nset_session(session)\n\n#get model filenames from models and auxillary models directory\nall_models = [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models')])]\nall_models = all_models + [name for _, name, _ in pkgutil.iter_modules([os.path.join('psp','models','auxiliary_models')])]\n\n#main function to train and evaluate CNN + RNN + DNN model\ndef main(args):\n \"\"\"\n Description:\n Main function for training, evaluating and plotting PSP models via GCP.\n Args:\n :args (dict): parsed input arguments.\n Returns:\n None\n \"\"\"\n #load json from config input parameters\n params = json.loads(args.params)\n gcp_params = json.loads(args.gcp_params)\n model_params = json.loads(args.model_params)\n\n #get input arguments\n config = args.config\n local = args.local\n job_dir = args.job_dir\n package_path = gcp_params[\"package_path\"]\n bucket = gcp_params[\"bucket\"]\n training_data = params[\"training_data\"]\n filtered = params[\"filtered\"]\n batch_size = int(params[\"batch_size\"])\n epochs = int(params[\"epochs\"])\n logs_path = str(params[\"logs_path\"])\n cuda = params[\"cuda\"]\n tpu = gcp_params[\"tpu\"]\n test_dataset = str(params[\"test_dataset\"])\n model_ = str(params[\"model\"])\n tf_version = tf.__version__\n lr_scheduler = str(model_params[\"lr_scheduler\"])\n callbacks = (model_params[\"callbacks\"])\n\n #if using TPU, initalise TensorFlow TPU Strategy\n if (tpu):\n tpu_strategy = setup_tpu()\n\n #initialise global GCP bucket variable\n initialise_bucket(bucket)\n\n #create data dir to store all training and test datasets\n if not os.path.exists(DATA_DIR):\n os.makedirs(DATA_DIR)\n\n #create output dir to store model training output\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n #create folder where all model assets and artifacts will be stored after training\n model_output_folder = os.path.join(os.path.join(OUTPUT_DIR, model_ + '_'+ current_datetime))\n os.makedirs(model_output_folder)\n\n #create logs path directory where TensorBoard logs will be stored\n if not os.path.exists(os.path.join(model_output_folder, logs_path)):\n os.makedirs(os.path.join(model_output_folder, logs_path))\n\n #create checkpoints dir where model checkpoints will be saved\n if not os.path.exists(os.path.join(model_output_folder, 'checkpoints')):\n os.makedirs(os.path.join(model_output_folder, 'checkpoints'))\n\n #append parameters to model output results file\n model_output[\"Output Folder\"] = model_output_folder\n model_output[\"Config\"] = os.path.basename(config)\n model_output[\"Model\"] = model_\n model_output[\"Bucket\"] = bucket\n model_output[\"Training Dataset Type\"] = training_data\n model_output[\"Filtered?\"] = filtered\n model_output[\"Test Dataset\"] = test_dataset\n model_output[\"Number of epochs\"] = epochs\n model_output[\"Batch size\"] = batch_size\n model_output[\"Tensorflow Version\"] = tf_version\n model_output[\"TensorBoard logs dir\"] = os.path.join(model_output_folder, logs_path)\n model_output[\"Cuda\"] = cuda\n model_output[\"TPU\"] = tpu\n model_output[\"LR Scheduler\"] = lr_scheduler\n\n #load training dataset\n cullpdb = CullPDB(type=training_data, filtered=filtered)\n\n all_models.append(model_)\n\n #verify model specified in config parameter is an available trainable model\n if model_ not in all_models:\n raise ValueError('Model must be in available models.')\n\n #import model module from models or auxillary models folder\n if (model_!=\"psp_dcblstm_model\" and model_!=\"psp_dculstm_model\" and model_!=\"dummy_model\"):\n mod = importlib.import_module(package_path + \".models.auxiliary_models.\"+model_)\n else:\n mod = importlib.import_module(package_path + \".models.\"+model_)\n\n #build imported model with parameters from config\n model = mod.build_model(model_params)\n\n all_callbacks = []\n\n #initialise Tensorflow callbacks, append each callback if used\n if (callbacks[\"tensorboard\"]):\n tensorboard = tf.keras.callbacks.TensorBoard(log_dir=(os.path.join(model_output_folder,\n logs_path)), histogram_freq=0, write_graph=True, write_images=True)\n all_callbacks.append(tensorboard)\n if (callbacks[\"earlyStopping\"]):\n earlyStopping = EarlyStopping(monitor='loss', patience=5, verbose=1, mode='min')\n all_callbacks.append(earlyStopping)\n if (callbacks[\"modelCheckpoint\"]):\n checkpoint = ModelCheckpoint(filepath=os.path.join(model_output_folder, 'checkpoints','model_' + current_datetime + '.h5'), \\\n verbose=1,save_best_only=True, monitor='loss', mode='min')\n all_callbacks.append(checkpoint)\n if (callbacks[\"csv_logger\"]):\n csv_logger = CSVLogger(os.path.join(model_output_folder, 'training.log'))\n all_callbacks.append(csv_logger)\n if (callbacks[\"reduceLROnPlateau\"]):\n reduceLROnPlateau = ReduceLROnPlateau(monitor=\"loss\", factor=0.1, patience=10, verbose=1, mode=\"min\")\n all_callbacks.append(reduceLROnPlateau)\n\n #get LR Scheduler callback to use from parameter in config file\n #remove any whitespace or '-' from lr_schedule name\n lr_scheduler = lr_scheduler.lower().strip().replace(\" \", \"\").replace(\"-\",\"\")\n if (lr_scheduler == \"exceptionaldecay\" or lr_scheduler == \"exponential\"):\n exponentialDecay = ExponentialDecay()\n lr_schedule = LearningRateScheduler(exponentialDecay)\n all_callbacks.append(lr_schedule)\n elif (lr_scheduler == \"timebaseddecay\" or lr_scheduler == \"timebased\"):\n timeBasedDecay = TimedBased()\n lr_schedule = LearningRateScheduler(timeBasedDecay)\n all_callbacks.append(lr_schedule)\n elif (lr_scheduler == \"stepdecay\" or lr_scheduler == \"exponential\"):\n stepDecay = StepDecay()\n lr_schedule = LearningRateScheduler(stepDecay)\n all_callbacks.append(lr_schedule)\n\n #start counter\n start = time.time()\n\n #fit model\n if cuda:\n with tf.device('/gpu:0'): #if training on GPU\n print('Fitting model...')\n history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},\n {'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},\n {'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,\n callbacks=all_callbacks,shuffle=True)\n else: #training on CPU (default) or TPU\n print('Fitting model...')\n history = model.fit({'main_input': cullpdb.train_hot, 'aux_input': cullpdb.trainpssm},\n {'main_output': cullpdb.trainlabel},validation_data=({'main_input': cullpdb.val_hot, 'aux_input': cullpdb.valpssm},\n {'main_output': cullpdb.vallabel}), epochs=epochs, batch_size=batch_size, verbose=2,\n callbacks=all_callbacks,shuffle=True)\n\n #stop counter, calculate elapsed time\n elapsed = (time.time() - start)\n print('Elapsed Training Time: {}'.format(elapsed))\n model_output[\"Training Time\"] = elapsed\n\n #save model locally in saved models dir - create dir in this dir to store all model related objects\n print('Model saved in {} folder as {} '.format(\n os.path.dirname(model_output_folder), os.path.basename(os.path.join(model_output_folder, 'model.h5'))))\n model.save(os.path.join(model_output_folder, 'model.h5'))\n\n #save model history pickle\n history_filepath = os.path.join(model_output_folder, 'history.pckl')\n save_history(history, history_filepath)\n\n #plot model history and all metric plots\n plot_history(history.history, model_output_folder, show_histograms = False,\n show_boxplots = True, show_kde = True, filter_outliers = True)\n\n #evaluating model on test datasets\n evaluate_cullpdb(model,cullpdb)\n evaluate_model(model, test_dataset=test_dataset)\n\n #visualise Keras model and all its layers, store in png\n #Need to manually install graphviz (https://graphviz.gitlab.io/download/) etc...\n if (local==\"1\"):\n visualise_model(model, model_output_folder)\n\n #save model architecture\n with open(os.path.join(model_output_folder, \"model_architecture.json\"), \"w\") as model_arch:\n model_arch.write(model.to_json(indent=3))\n\n #getting output results from model into csv\n model_output_df = get_model_output(model_output_folder)\n\n #upload configuration json to storage bucket\n #local flag used as config file upload doesn't seem to work when training on GCP, only locally\n if (local==\"1\"):\n upload_file(os.path.join(model_output_folder,os.path.basename(config)),config)\n\n # upload model output folder and all training results and assets\n upload_directory(model_output_folder, model_output_folder)\n\n print('Model training files exported to bucket path: {}/{} '.format(bucket, model_output_folder))\n\n #append training results of current job to all results file\n append_all_output(model_output_df)\n\n #close tensorflow session\n session.close()\n\nif __name__ == \"__main__\":\n\n #############################################################\n ### PSP Input Arguments ###\n #############################################################\n\n parser = argparse.ArgumentParser(description='Protein Secondary Structure Prediction')\n\n parser.add_argument('-local', '--local', required=True,\n help='Flag to determine if job being run locally or on GCP.')\n parser.add_argument('-job-dir', '--job-dir', type=str, required=True,\n help='Directory where logs from training job are stored.')\n parser.add_argument('-config', '--config', type=str, required=True,\n help='File path to config json file.')\n parser.add_argument('-params', '--params', type=str, required=True,\n help='General training parameters')\n parser.add_argument('-gcp_params', '--gcp_params', type=str, required=True,\n help='GCP job parameters')\n parser.add_argument('-model_params', '--model_params', type=str, required=True,\n help='ML model parameters')\n\n #parse input arguments\n args = parser.parse_args()\n\n main(args)\n"
] | [
[
"tensorflow.device",
"tensorflow.compat.v1.ConfigProto",
"tensorflow.compat.v1.GPUOptions",
"tensorflow.compat.v1.keras.backend.set_session",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.compat.v1.Session",
"tensorflow.keras.backend.clear_session",
"tensorflow.compat.v1.reset_default_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
htcr/deeplab-pytorch | [
"8cea35415112fefb6a886d0d98ab64350ed09601"
] | [
"api.py"
] | [
"import numpy as np\nimport cv2\n\nimport os\nimport os.path as osp\n\nimport torch\nimport yaml\nfrom addict import Dict\nimport matplotlib.pyplot as plt\n\nfrom .libs.models import *\nfrom .libs.utils import DenseCRF\n\nfrom demo import preprocessing, inference\n\nclass DeepLabV2Masker(object):\n def __init__(self, crf=True):\n cur_dir = osp.dirname(osp.realpath(__file__))\n \n config_path = osp.join(\n cur_dir,\n 'configs/human.yaml'\n )\n model_path = osp.join(\n cur_dir,\n 'data/models/human/deeplabv2_resnet101_msc/all_human/checkpoint_final.pth'\n )\n \n device = torch.device('cuda')\n CONFIG = Dict(yaml.load(open(config_path, 'r')))\n\n torch.set_grad_enabled(False)\n # CRF post-processor\n self.crf = crf\n if crf:\n self.postprocessor = DenseCRF(\n iter_max=CONFIG.CRF.ITER_MAX,\n pos_xy_std=CONFIG.CRF.POS_XY_STD,\n pos_w=CONFIG.CRF.POS_W,\n bi_xy_std=CONFIG.CRF.BI_XY_STD,\n bi_rgb_std=CONFIG.CRF.BI_RGB_STD,\n bi_w=CONFIG.CRF.BI_W,\n )\n else:\n self.postprocessor = None\n \n self.model = eval(CONFIG.MODEL.NAME)(n_classes=CONFIG.DATASET.N_CLASSES)\n state_dict = torch.load(model_path, map_location=lambda storage, loc: storage)\n self.model.load_state_dict(state_dict)\n self.model.eval()\n self.model.to(device)\n print(\"Model:\", CONFIG.MODEL.NAME)\n\n self.CONFIG = CONFIG\n self.device = device\n \n\n def get_mask(self, image, bk):\n ori_h, ori_w = image.shape[:2]\n image, raw_image = preprocessing(image, self.device, self.CONFIG)\n \n bk = cv2.resize(bk, raw_image.shape[:2][::-1])\n \n diff = np.maximum(raw_image, bk).astype(np.float32) / (np.minimum(raw_image, bk).astype(np.float32) + 0.1)\n \n diff = (diff - np.min(diff)) / (np.max(diff) - np.min(diff)) * 255\n\n diff = diff.astype(np.uint8)\n\n raw_image = diff\n\n #plt.imshow(raw_image)\n #plt.show() \n\n labelmap = inference(self.model, image, raw_image, self.postprocessor)\n mask = labelmap == 1\n mask = mask.astype(np.uint8) * 255\n mask = cv2.resize(mask, (ori_w, ori_h))\n mask = np.where(mask > 128, 255, 0).astype(np.uint8)\n return mask"
] | [
[
"numpy.maximum",
"numpy.minimum",
"torch.load",
"numpy.min",
"numpy.max",
"torch.set_grad_enabled",
"torch.device",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alicechi2/LargeScaleCoverSongId | [
"d33a8425ce8761f09537d657d29c0e4b87e05249"
] | [
"binary_task.py"
] | [
"#!/usr/bin/env python\n\"\"\"\nBinary task of cover song identification using the Millions Song Dataset \nand the Second Hand Song dataset.\n\nIt takes the Million Song Dataset path as an argument. \n\nThe list of queries to test must be located in:\n./SHS/list_500queries.txt\n\nThe training set of the Second Hand Song dataset must be located in:\n./SHS/shs_dataset_train.txt\n\nPlease, read the README.md file for more info on how to run this code.\n\nReferences:\nBertin-Mahieux, T., & Ellis, D. P. W. (2012). Large-Scale Cover Song \nRecognition Using The 2D Fourier Transform Magnitude. In Proc. of the 13th \nInternational Society for Music Information Retrieval Conference (pp. 241-246).\nPorto, Portugal.\n\nHumphrey, E. J., Nieto, O., & Bello, J. P. (2013). Data Driven and \nDiscriminative Projections for Large-Scale Cover Song Identification. \nIn Proc. of the 14th International Society for Music Information Retrieval \nConference. Curitiba, Brazil.\n\nCreated by Thierry Bertin-Mahieux ([email protected])\nModified by Uri Nieto ([email protected])\n\n----\nThis code is distributed under the GNU LESSER PUBLIC LICENSE \n(LGPL, see www.gnu.org).\n\nCopyright (c) 2012-2013 MARL@NYU.\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n a. Redistributions of source code must retain the above copyright notice,\n this list of conditions and the following disclaimer.\n b. Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n c. Neither the name of MARL, NYU nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR\nANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\nDAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\nSERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\nCAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT\nLIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY\nOUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH\nDAMAGE.\n\"\"\"\n\nimport argparse\nimport cPickle\nimport numpy as np\nimport os\nimport sys\nimport time\n\n# local stuff\nimport pca\nimport hdf5_getters as GETTERS\nimport dan_tools\nimport utils\nfrom transforms import load_transform\n\n# Thierry's original parameters for ISMIR paper\nWIN = 75\nPWR = 1.96\nPATCH_LEN = WIN*12\n\n# Set up logger\nlogger = utils.configure_logger()\n\ndef extract_feats(filename, td=None, lda_file=None, lda_n=0, ver=True):\n \"\"\"Computes the features using the dictionary transformation td. \n If it doesn't exist, computes them using Thierry's method.\n\n The improved pipeline is composed of 11 steps:\n\n 1.- Beat Synchronous Chroma\n 2.- L2-Norm\n 3.- Shingle (PATCH_LEN: 75 x 12)\n 4.- 2D-FFT\n 5.- L2-Norm\n 6.- Log-Scale\n 7.- Sparse Coding\n 8.- Shrinkage\n 9.- Median Aggregation\n 10.- Dimensionality Reduction\n 11.- L2-Norm\n\n Original method by Thierry doesn't include steps 5,6,7,8,11.\n \"\"\"\n # 1.- Beat Synchronous Chroma\n # 2.- L2-Norm\n # 3.- Shingle (PATCH_LEN: 75 x 12)\n # 4.- 2D-FFT\n feats = utils.extract_feats(filename)\n if feats is None:\n return None\n\n if td is not None:\n # 5.- L2-Norm\n # 6.- Log-Scale\n # 7.- Sparse Coding\n # 8.- Shrinkage\n H = td(feats)\n else:\n H = feats\n\n #. 9.- Median Aggregation\n H = np.median(H, axis=0)\n\n # Apply LDA if needed\n if lda_file is not None:\n # 10.- Dimensionality Reduction\n H = lda_file[lda_n].transform(H)\n\n # 11.- L2-Norm\n feats = dan_tools.chromnorm(H.reshape(H.shape[0], 1)).squeeze()\n\n return feats\n\n\ndef read_query_file(queriesf):\n \"\"\"Read queries, return triplets (query/good/bad).\"\"\"\n queries = []\n triplet = []\n f = open(queriesf, 'r')\n for line in f.xreadlines():\n if line == '' or line.strip() == '':\n continue\n if line[0] == '#':\n continue\n if line[0] == '%':\n assert len(triplet) == 0 or len(triplet) == 3\n if len(triplet) > 0:\n queries.append(triplet)\n triplet = []\n continue\n tid = line.strip()\n assert len(tid) == 18 and tid[:2] == 'TR'\n triplet.append(tid)\n assert len(triplet) == 3\n queries.append(triplet)\n f.close()\n logger.info('Found %d queries from file %s' % (len(queries), queriesf))\n return queries\n\n\ndef main():\n # Args parser\n parser = argparse.ArgumentParser(description=\n \"Evaluates the 500 binary queries from the SHS data set\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"msd_dir\", action=\"store\",\n help=\"Million Song Dataset main directory\")\n parser.add_argument(\"-dictfile\", action=\"store\", default=\"\",\n help=\"Pickle to the learned dictionary\")\n parser.add_argument(\"-lda\", action=\"store\", nargs=2, default=[None,0], \n help=\"LDA file and version\", metavar=('lda.pkl', 'n'))\n parser.add_argument(\"-pca\", nargs=2, metavar=('f.pkl', 'n'), \n default=(\"\", 0),\n help=\"pca model saved in a pickle file, \" \\\n \"use n dimensions\")\n # Parse\n args = parser.parse_args()\n\n # Track time\n start_time = time.time()\n\n maindir = args.msd_dir\n queriesf = \"SHS/list_500queries.txt\"\n shsf = \"SHS/shs_dataset_train.txt\"\n lda = args.lda[0]\n lda_n = int(args.lda[1])\n pcafile = args.pca[0]\n pcadim = int(args.pca[1])\n\n # sanity cheks\n utils.assert_file(maindir)\n utils.assert_file(queriesf)\n utils.assert_file(shsf)\n utils.assert_file(pcafile)\n\n # read queries\n queries = read_query_file(queriesf)\n\n # load pca\n trainedpca = None\n if pcafile != \"\":\n f = open(pcafile, 'r')\n trainedpca = cPickle.load(f)\n f.close()\n assert pcadim > 0\n logger.info('trained pca loaded')\n\n # load lda\n if lda != None:\n lda = utils.load_pickle(lda)\n\n # to keep stats\n results = []\n\n # iterate over queries\n logger.info(\"Starting the binary task...\")\n\n # Get the dictionary transform\n td = load_transform(args.dictfile)\n\n for triplet in queries:\n # get features\n filenames = map(lambda tid: utils.path_from_tid(maindir, tid), triplet)\n triplet_feats = map(lambda f: extract_feats(f, td=td, \n lda_file=lda, lda_n=lda_n), filenames)\n if None in triplet_feats:\n continue\n\n # Apply pca if needed\n if trainedpca:\n triplet_feats = map(lambda feat: \\\n trainedpca.apply_newdata(feat, ndims=pcadim),\n triplet_feats)\n assert triplet_feats[np.random.randint(3)].shape[0] == pcadim\n \n # Compute result\n res1 = triplet_feats[0] - triplet_feats[1]\n res1 = np.sum(res1 * res1)\n res2 = triplet_feats[0] - triplet_feats[2]\n res2 = np.sum(res2 * res2)\n if res1 < res2:\n results.append(1)\n else:\n results.append(0)\n\n # verbose\n if len(results) % 5 == 0:\n logger.info(' --- after %d queries, accuracy: %.1f %%' % \\\n (len(results), 100. * np.mean(results)))\n # done\n logger.info('After %d queries, accuracy: %.1f %%' % (len(results),\n 100. * np.mean(results)))\n logger.info('Done! Took %.2f seconds' % (time.time() - start_time))\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.median",
"numpy.mean",
"numpy.sum",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bmorris3/mosfire_wasp6 | [
"df802640eeb717a649c18caa1e940b684eeb99dc"
] | [
"analysis/moar/samples.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 09:33:04 2015\n\n@author: bmmorris\n\"\"\"\n\nimport numpy as np\nimport triangle\nfrom matplotlib import pyplot as plt\n\ndef splitchain(infile, outfile, tossfraction=0.9):\n '''\n Take the last `savefraction` of file `infile`, save it as the\n smaller file `outfile`.\n '''\n with open(outfile, 'w') as out:\n with open(infile, 'r') as f:\n alllines = f.readlines()\n lastXpercent = int(tossfraction*len(alllines))\n shortlines = alllines[lastXpercent:]\n out.write(''.join(shortlines))\n\ndef loadchains(directory, file='chains.dat', burnin=0.0):\n '''\n Load chains in `directory` saved to text file `file`, eliminate\n burn in fraction `burnin`\n '''\n\n chains = np.loadtxt(directory+file)\n burnin = int(burnin*chains.shape[0])\n lnp = chains[burnin:, 1]\n samples = chains[burnin:, 2:]\n return lnp, samples\n \nclass emceesamples(object):\n def __init__(self, samples, labels, dtypes, Nbins, Nlightcurves):\n '''\n Input the samples, output from loadchains(), labels for each parameter\n and data types for each parameter according to the following format: \n \n 'o' = orbital parameter\n 'l' = (L) limb darkening\n 't' = transit parameters particular to each spectral bin\n 'w' = white noise hyperparameters\n 'r' = red noise hyperparameters\n 'a' = airmass\n 'R' = radius\n 'F' = out of transit flux\n \n '''\n \n self.samples = samples\n self.labels = labels\n self.dtypes = dtypes\n self.Nbins = Nbins\n self.Nlightcurves = Nlightcurves\n self.white = None\n self.red = None\n \n self.getld()\n self.getRpRs()\n self.getF0()\n self.getorb()\n self.getam()\n if 'w' in self.dtypes:\n self.getwhite()\n if 'r' in self.dtypes:\n self.getred()\n \n def getwhite(self):\n whiteinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'w']\n self.white = self.samples[:,whiteinds]\n self.whitelabels = len(whiteinds)*['w']\n\n def getred(self):\n redinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'r']\n self.red = self.samples[:,redinds]\n\n def getld(self):\n ldinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'l']\n self.ld = self.samples[:,ldinds]\n self.ldlabels = [label for i, label in enumerate(self.labels) \n if i in ldinds] \n\n def getorb(self):\n orbinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'o']\n self.orb = self.samples[:,orbinds]\n self.orblabels = [label for i, label in enumerate(self.labels) \n if i in orbinds] \n\n def getRpRs(self):\n RpRsinds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'R']\n self.RpRs = self.samples[:,RpRsinds]\n self.RpRslabels = [label for i, label in enumerate(self.labels) \n if i in RpRsinds] \n\n def getF0(self):\n F0inds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'F']\n self.F0 = self.samples[:,F0inds]\n self.F0labels = [label for i, label in enumerate(self.labels) \n if i in F0inds] \n\n def getam(self):\n aminds = [i for i in range(len(self.dtypes)) \n if self.dtypes[i] == 'a']\n self.am = self.samples[:,aminds]\n self.amlabels = [label for i, label in enumerate(self.labels) \n if i in aminds] \n\n def triangles(self, directory=None, wavelengths=None, show=False):\n '''\n Create triangle plots. If directory is not None, save plots in that \n directory.\n '''\n \n if wavelengths is None:\n wavelengths = np.arange(self.Nlightcurves)\n \n # Orbital parameters \n Norbparams = len(self.orblabels)\n trifig1, ax = plt.subplots(Norbparams, Norbparams, figsize=(10, 10))\n kwargs = dict(fig=trifig1, plot_datapoints=False, \n labels=self.orblabels)\n fig1 = triangle.corner(self.orb, **kwargs) \n trifig1.suptitle('Orbital Parameters', size=20)\n if directory is not None:\n trifig1.savefig(directory+'triangle_orbit.png',bbox_inches='tight')\n if not show:\n plt.clf()\n \n # Plot Limb darkening parameters\n for i in range(0, len(self.ldlabels), 2):\n trifigLD, ax = plt.subplots(2, 2, figsize=(6, 6))\n kwargs = dict(fig=trifigLD, plot_datapoints=False, \n labels=self.ldlabels[i:i+2])\n fig2 = triangle.corner(self.ld[:,i:i+2], \n labelspace=False, **kwargs) \n trifigLD.suptitle('LD Parameters', size=20)\n if directory is not None:\n trifigLD.savefig(directory+'triangle_ld{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf()\n \n # Plot Limb darkening parameters \n for i in range(0, len(self.ldlabels), 2):\n trifigLD, ax = plt.subplots(2, 2, figsize=(6, 6))\n kwargs = dict(fig=trifigLD, plot_datapoints=False, \n labels=self.ldlabels[i:i+2])\n fig2 = triangle.corner(self.ld[:,i:i+2], \n labelspace=False, **kwargs) \n trifigLD.suptitle('LD Parameters', size=20)\n if directory is not None:\n trifigLD.savefig(directory+'triangle_ld{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf() \n \n \n # Plot RpRs, F0, white noise\n for i in range(len(self.RpRslabels)):\n if i < self.Nbins:\n trifig, ax = plt.subplots(4, 4, figsize=(6, 6))\n kwargs = dict(fig=trifig, plot_datapoints=False, \n labels=[self.RpRslabels[i], self.F0labels[i],\n self.whitelabels[i], self.amlabels[i]])\n testsamples = np.vstack([self.RpRs[:,i],\n self.F0[:,i],\n self.white[:,i],\n self.am[:,i]]).T\n else:\n trifig, ax = plt.subplots(3, 3, figsize=(6, 6))\n kwargs = dict(fig=trifig, plot_datapoints=False, \n labels=[self.RpRslabels[i], self.F0labels[i],\n self.whitelabels[i]])\n testsamples = np.vstack([self.RpRs[:,i],\n self.F0[:,i],\n self.white[:,i]]).T\n\n fig2 = triangle.corner(testsamples, labelspace=True, **kwargs) \n trifig.suptitle('{0:.3f}$\\mu m$'.format(wavelengths[i]), size=20)\n if directory is not None:\n trifig.savefig(directory+'triangle_RpRs{0}.png'.format(i/2),\n bbox_inches='tight')\n if not show:\n plt.clf() \n \n if show:\n plt.show()\n else:\n plt.clf()\n \n \n \n \n \n "
] | [
[
"numpy.arange",
"numpy.vstack",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.clf",
"matplotlib.pyplot.show",
"numpy.loadtxt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JasonQSY/Associative3D | [
"c50818b593ec48c38ed7ee3e109c23531089da32",
"c50818b593ec48c38ed7ee3e109c23531089da32",
"c50818b593ec48c38ed7ee3e109c23531089da32",
"c50818b593ec48c38ed7ee3e109c23531089da32"
] | [
"object_branch/benchmark/suncg/evaluate_detection.py",
"object_branch/benchmark/nyu/box3d.py",
"object_branch/utils/nyu_parse.py",
"blender/render_utils.py"
] | [
"# ---------------------------------------------------------\n# Copyright (c) 2015, Saurabh Gupta\n#\n# Licensed under The MIT License [see LICENSE for details]\n# ---------------------------------------------------------\nfrom ...utils import bbox_utils\nimport numpy as np\n\ndef inst_bench_image(dt, gt, bOpts, overlap = None):\n\n nDt = len(dt['sc'])\n nGt = len(gt['diff'])\n numInst = np.sum(gt['diff'] == False)\n\n if overlap is None:\n overlap = bbox_utils.bbox_overlaps(dt['boxInfo'].astype(np.float), gt['boxInfo'].astype(np.float))\n # assert(issorted(-dt.sc), 'Scores are not sorted.\\n');\n sc = dt['sc'];\n\n det = np.zeros((nGt,1)).astype(np.bool)\n tp = np.zeros((nDt,1)).astype(np.bool)\n fp = np.zeros((nDt,1)).astype(np.bool)\n dupDet = np.zeros((nDt,1)).astype(np.bool)\n instId = np.zeros((nDt,1)).astype(np.int32)\n ov = np.zeros((nDt,1)).astype(np.float32)\n\n # Walk through the detections in decreasing score\n # and assign tp, fp, fn, tn labels\n for i in range(nDt):\n # assign detection to ground truth object if any\n if nGt > 0:\n maxOverlap = overlap[i,:].max(); maxInd = overlap[i,:].argmax();\n instId[i] = maxInd; ov[i] = maxOverlap;\n else:\n maxOverlap = 0; instId[i] = -1; maxInd = -1;\n # assign detection as true positive/don't care/false positive\n if maxOverlap >= bOpts['minoverlap']:\n if gt['diff'][maxInd] == False:\n if det[maxInd] == False:\n # true positive\n tp[i] = True;\n det[maxInd] = True;\n else:\n # false positive (multiple detection)\n fp[i] = True;\n dupDet[i] = True;\n else:\n # false positive\n fp[i] = True;\n return tp, fp, sc, numInst, dupDet, instId, ov\n\n\ndef inst_bench(dt, gt, bOpts, tp=None, fp=None, score=None, numInst=None):\n \"\"\"\n ap, rec, prec, npos, details = inst_bench(dt, gt, bOpts, tp = None, fp = None, sc = None, numInst = None)\n dt - a list with a dict for each image and with following fields\n .boxInfo - info that will be used to cpmpute the overlap with ground truths, a list\n .sc - score\n gt\n .boxInfo - info used to compute the overlap, a list\n .diff - a logical array of size nGtx1, saying if the instance is hard or not\n bOpt\n .minoverlap - the minimum overlap to call it a true positive\n [tp], [fp], [sc], [numInst]\n Optional arguments, in case the inst_bench_image is being called outside of this function\n \"\"\"\n details = None\n if tp is None:\n # We do not have the tp, fp, sc, and numInst, so compute them from the structures gt, and out\n tp = []; fp = []; numInst = []; score = []; dupDet = []; instId = []; ov = [];\n for i in range(len(gt)):\n # Sort dt by the score\n sc = dt[i]['sc']\n bb = dt[i]['boxInfo']\n ind = np.argsort(sc, axis = 0);\n ind = ind[::-1]\n if len(ind) > 0:\n sc = np.vstack((sc[i,:] for i in ind))\n bb = np.vstack((bb[i,:] for i in ind))\n else:\n sc = np.zeros((0,1)).astype(np.float)\n bb = np.zeros((0,4)).astype(np.float)\n\n dtI = dict({'boxInfo': bb, 'sc': sc})\n tp_i, fp_i, sc_i, numInst_i, dupDet_i, instId_i, ov_i = inst_bench_image(dtI, gt[i], bOpts)\n tp.append(tp_i); fp.append(fp_i); score.append(sc_i); numInst.append(numInst_i);\n dupDet.append(dupDet_i); instId.append(instId_i); ov.append(ov_i);\n details = {'tp': list(tp), 'fp': list(fp), 'score': list(score), 'dupDet': list(dupDet),\n 'numInst': list(numInst), 'instId': list(instId), 'ov': list(ov)}\n\n tp = np.vstack(tp[:])\n fp = np.vstack(fp[:])\n sc = np.vstack(score[:])\n\n cat_all = np.hstack((tp,fp,sc))\n ind = np.argsort(cat_all[:,2])\n cat_all = cat_all[ind[::-1],:]\n tp = np.cumsum(cat_all[:,0], axis = 0);\n fp = np.cumsum(cat_all[:,1], axis = 0);\n thresh = cat_all[:,2];\n npos = np.sum(numInst, axis = 0);\n\n # Compute precision/recall\n rec = tp / npos;\n prec = np.divide(tp, (fp+tp));\n ap = VOCap(rec, prec);\n return ap, rec, prec, npos, details\n\ndef VOCap(rec, prec):\n rec = rec.reshape(rec.size,1); prec = prec.reshape(prec.size,1)\n z = np.zeros((1,1)); o = np.ones((1,1));\n mrec = np.vstack((z, rec, o))\n mpre = np.vstack((z, prec, z))\n for i in range(len(mpre)-2, -1, -1):\n mpre[i] = max(mpre[i], mpre[i+1])\n\n I = np.where(mrec[1:] != mrec[0:-1])[0]+1;\n ap = 0;\n for i in I:\n ap = ap + (mrec[i] - mrec[i-1])*mpre[i];\n return ap\n",
"\n\"\"\"Script for dwr prediction benchmarking.\n\"\"\"\n# Sample usage:\n# (shape_ft) : python -m factored3d.benchmark.suncg.dwr --num_train_epoch=1 --name=dwr_shape_ft --classify_rot --pred_voxels=True --use_context --save_visuals --visuals_freq=50 --eval_set=val --suncg_dl_debug_mode --max_eval_iter=20\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom absl import app\nfrom absl import flags\nimport os\nimport os.path as osp\nimport numpy as np\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport time\nimport scipy.misc\nimport pdb\nimport copy\nimport json\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport time\nimport random\nfrom ...data import nyu as nyu_data\nfrom ..suncg import evaluate_detection\nfrom ...utils import bbox_utils\nfrom ...utils import suncg_parse\nfrom ...utils import nyu_parse\nfrom ...nnutils import test_utils\nfrom ...nnutils import net_blocks\nfrom ...nnutils import loss_utils\nfrom ...nnutils import oc_net\nfrom ...nnutils import disp_net\nfrom ...utils import metrics\nfrom ...utils import visutil\nfrom ...renderer import utils as render_utils\nfrom ...utils import quatUtils\nimport cv2\nfrom ...utils import transformations\nfrom collections import Counter\nfrom six.moves import cPickle as pickle\nimport collections\n\nq2e = quatUtils.convert_quat_to_euler\n\ncurr_path = osp.dirname(osp.abspath(__file__))\ncache_path = osp.join(curr_path, '..', '..', 'cachedir')\nflags.DEFINE_string('rendering_dir', osp.join(cache_path, 'rendering'),\n 'Directory where intermittent renderings are saved')\n\nflags.DEFINE_integer('voxel_size', 32, 'Spatial dimension of shape voxels')\nflags.DEFINE_integer('n_voxel_layers', 5, 'Number of layers ')\nflags.DEFINE_integer('voxel_nc_max', 128, 'Max 3D channels')\nflags.DEFINE_integer('voxel_nc_l1', 8, 'Initial shape encder/decoder layer dimension')\nflags.DEFINE_float('voxel_eval_thresh', 0.25, 'Voxel evaluation threshold')\nflags.DEFINE_string('id', 'default', 'Plot string')\n\nflags.DEFINE_string('shape_pretrain_name', 'object_autoenc_32', 'Experiment name for pretrained shape encoder-decoder')\nflags.DEFINE_integer('shape_pretrain_epoch', 800, 'Experiment name for shape decoder')\n\nflags.DEFINE_integer('max_rois', 100, 'If we have more objects than this per image, we will subsample.')\nflags.DEFINE_integer('max_total_rois', 100, 'If we have more objects than this per batch, we will reject the batch.')\nflags.DEFINE_integer('num_visuals', 200, 'Number of renderings')\nflags.DEFINE_boolean('preload_stats', False, 'Reload the stats for the experiment')\nflags.DEFINE_string('layout_name', 'layout_pred', 'Experiment name for layout predictor')\nflags.DEFINE_integer('layout_train_epoch', 8, 'Experiment name for layout predictor')\nflags.DEFINE_boolean('use_gt_voxels', True, 'Use gt_voxels_for_prediction')\nflags.DEFINE_string('ovis_ids_filename', None, 'Ids to visualize output file')\nflags.DEFINE_string('ivis_ids_filename', None, 'Ids to visualize output file')\nflags.DEFINE_string('results_name', None, 'results_name')\nflags.DEFINE_boolean('gt_updates', False, 'Use gt_relative updates')\nflags.DEFINE_boolean('do_updates', True, 'Do opt updates')\nflags.DEFINE_string('index_file', None, 'file containing house names and view ids')\nflags.DEFINE_string('log_csv', None, 'file containing relative acc data')\nflags.DEFINE_boolean('draw_vis', False, 'Do not evaluate only draw visualization')\nflags.DEFINE_boolean('load_predictions_from_disk', False, 'Load pkl files')\nflags.DEFINE_boolean('save_predictions_to_disk', True, 'Save pkl files')\nflags.DEFINE_float('lambda_weight', 1.0, 'lambda for rotation')\nflags.DEFINE_float('split_size', 1.0, 'Split size of the train set')\nflags.DEFINE_boolean('only_pairs', True, 'Train with only more than 2 examples per ')\nflags.DEFINE_boolean('dwr_model', False, 'Load a dwr mode ')\n\nFLAGS = flags.FLAGS\n\nEP_box_iou_thresh = [0.5, 0.5, 0.5, 0.5, 0., 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, ]\nEP_rot_delta_thresh = [30., 30., 400., 30., 30., 30., 400., 30., 400., 400., 400., 30, ]\nEP_trans_delta_thresh = [1., 1., 1., 1000., 1, 1., 1000., 1000., 1.0, 1000., 1000., 1000., ]\nEP_shape_iou_thresh = [0.25, 0, 0.25, 0.25, 0.25, 0.25, 0, 0, 0, 0.25, 0, 0.25, ]\nEP_scale_delta_thresh = [0.5, 0.5, 0.5, 0.5, 0.5, 100., 100., 100, 100, 100, 0.5, 100, ]\nEP_ap_str = ['all', '-shape', '-rot', '-trans', '-box2d', '-scale', 'box2d',\n 'box2d+rot', 'box2d+trans', 'box2d+shape', 'box2d+scale', 'box2d+rot+shape', ]\n\n\ndef my_print(tensor):\n try:\n print(np.round(tensor.numpy(),2))\n except:\n print(np.round(tensor, 2))\n return\n\nclass DWRTester(test_utils.Tester):\n\n def define_model(self):\n '''\n Define the pytorch net 'model' whose weights will be updated during training.\n '''\n self.eval_shape_iou = False\n opts = self.opts\n self.object_class2index = {'bed' : 1, 'sofa' :2, 'table' :3, \n 'chair':4 , 'desk':5, 'television':6,\n }\n\n self.index2object_class = {1: 'bed', 2 :'sofa', 3 : 'table', \n 4 :'chair', 5 : 'desk', 6 : 'television',\n }\n\n self.voxel_encoder, nc_enc_voxel = net_blocks.encoder3d(\n opts.n_voxel_layers, nc_max=opts.voxel_nc_max, nc_l1=opts.voxel_nc_l1, nz_shape=opts.nz_shape)\n\n self.voxel_decoder = net_blocks.decoder3d(\n opts.n_voxel_layers, opts.nz_shape, nc_enc_voxel, nc_min=opts.voxel_nc_l1)\n\n self.model = oc_net.OCNet(\n (opts.img_height, opts.img_width), opts=self.opts,\n roi_size=opts.roi_size,\n use_context=opts.use_context, nz_feat=opts.nz_feat,\n pred_voxels=False, nz_shape=opts.nz_shape, pred_labels=True, pred_graph=opts.pred_graph,\n classify_rot=opts.classify_rot, nz_rot=opts.nz_rot, n_g_layers=opts.n_g_layers,)\n #\n\n if opts.pred_voxels and opts.dwr_model:\n self.model.code_predictor.shape_predictor.add_voxel_decoder(\n copy.deepcopy(self.voxel_decoder))\n\n if opts.dwr_model:\n # self.opts.num_train_epoch=1\n self.model.add_label_predictor()\n self.eval_shape_iou = True\n opts.use_gt_voxels = False\n\n self.load_network(self.model, 'pred', self.opts.num_train_epoch)\n \n if not opts.dwr_model:\n self.model.add_label_predictor()\n \n self.model.eval()\n self.model = self.model.cuda()\n # self.model = self.model.cuda(device=self.opts.gpu_id)\n\n if opts.pred_voxels and (not opts.dwr_model):\n self.voxel_decoder = copy.deepcopy(self.model.code_predictor.shape_predictor.decoder)\n\n self.layout_model = disp_net.dispnet()\n network_dir = osp.join(opts.cache_dir, 'snapshots', opts.layout_name)\n self.load_network(\n self.layout_model, 'pred', opts.layout_train_epoch, network_dir=network_dir)\n # self.layout_model.eval()\n # self.layout_model = self.layout_model.cuda(device=self.opts.gpu_id)\n\n return\n\n def init_dataset(self):\n opts = self.opts\n self.resnet_transform = torchvision.transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n split_dir = osp.join(opts.nyu_dir, 'splits')\n self.split = nyu_parse.get_split(split_dir, image_names=os.listdir(osp.join(opts.nyu_dir, 'images')))\n # houses_splits = self.split[opts.eval_set]\n if opts.eval_set == 'train':\n rng = np.random.RandomState(10) \n rng.shuffle(self.split[opts.eval_set])\n len_splitset = int(len(self.split[opts.eval_set])*opts.split_size)\n self.split[opts.eval_set] = self.split[opts.eval_set][0:len_splitset]\n # print(self.split[opts.eval_set])\n\n self.dataloader = nyu_data.nyu_data_loader_benchmark(self.split[opts.eval_set], opts)\n\n if opts.voxel_size < 64:\n self.downsample_voxels = True\n self.downsampler = render_utils.Downsample(\n 64 // opts.voxel_size, use_max=True, batch_mode=True\n ).cuda()\n else:\n self.downsampler = None\n\n if opts.classify_rot:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids.mat'))['medoids']).type(torch.FloatTensor)\n\n if not opts.pred_voxels:\n network_dir = osp.join(opts.cache_dir, 'snapshots', opts.shape_pretrain_name)\n self.load_network(\n self.voxel_decoder,\n 'decoder', opts.shape_pretrain_epoch, network_dir=network_dir)\n self.voxel_decoder.eval()\n self.voxel_decoder = self.voxel_decoder.cuda()\n\n self.spatial_image = Variable(nyu_data.define_spatial_image(opts.img_height_fine, opts.img_width_fine, 1.0/16).unsqueeze(0).cuda()) ## (1, 2, 30, 40)\n \n if opts.classify_rot:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids.mat'))['medoids']).type(torch.FloatTensor)\n if opts.nz_rot == 48:\n self.quat_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_48.mat'))['medoids']).type(torch.FloatTensor)\n\n nz_rel_rot = opts.nz_rel_rot\n self.quat_medoids_relative = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_relative_{}_new.mat'.format(nz_rel_rot)))['medoids']).type(torch.FloatTensor)\n assert len(self.quat_medoids_relative) == opts.nz_rel_rot, ' Relative rotation architecture does not match'\n # self.quat_medoids_relative = torch.from_numpy(\n # scipy.io.loadmat(osp.join(opts.cache_dir, 'quat_medoids_relative.mat'))['medoids']).type(torch.FloatTensor)\n self.quat_medoids_var = None\n\n # define the nearest bin metric?\n # n_absoulte_bins = len(self.quat_medoids)\n # quatsA = self.quat_medoids.unsqueeze(1).expand(torch.Size(\n # [n_absoulte_bins, n_absoulte_bins, 4])).contiguous().view(-1, 4)\n # quatsB = self.quat_medoids.unsqueeze(0).expand(torch.Size(\n # [n_absoulte_bins, n_absoulte_bins, 4])).contiguous().view(-1, 4)\n # quatsA_conjugate = quatUtils.quat_conjugate(quatsA)\n # relative_quats = quatUtils.rotate_quat(quatsB, quatsA_conjugate)\n # self.relative_quats_binids = suncg_parse.quats_to_bininds(relative_quats, self.quat_medoids_relative)\n # self.relative_quats_binids = self.relative_quats_binids.view(n_absoulte_bins, n_absoulte_bins)\n if opts.classify_dir:\n self.direction_medoids = torch.from_numpy(\n scipy.io.loadmat(osp.join(opts.cache_dir, 'direction_medoids_relative_{}_new.mat'.format(opts.nz_rel_dir)))['medoids']).type(torch.FloatTensor)\n self.direction_medoids = torch.nn.functional.normalize(self.direction_medoids)\n\n self.data_vis = []\n self.stored_quat_relative_gt_classes = []\n self.stored_quat_relative_pred_classes = []\n self.rotation_bins = []\n self.translation = []\n self.pred_translation = []\n self.pred_rotation = []\n self.pred_relative_directions = []\n self.relative_directions =[]\n return\n\n def decode_shape(self, pred_shape):\n opts = self.opts\n if opts.use_gt_voxels:\n # assert pred_shape.size() == self.codes_gt[0].size(), 'predict size from gt incorrect'\n return self.codes_gt['shape'].clone()\n\n pred_shape = torch.nn.functional.sigmoid(self.voxel_decoder.forward(pred_shape))\n return pred_shape\n\n def decode_rotation(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids), requires_grad=False)\n return pred_rot\n\n def decode_rotation_topk(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.topk(pred_rot.data.cpu(), k=2, dim=1)\n bin_inds = bin_inds.view(-1, 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids), requires_grad=False)\n pred_rot = pred_rot.view(-1, 2, 4)\n return pred_rot\n\n def get_class_indices(self, pred_rot):\n opts = self.opts\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n return bin_inds\n\n def decode_rotation_relative(self, pred_rot):\n opts = self.opts\n if opts.classify_rot:\n _, bin_inds = torch.max(pred_rot.data.cpu(), 1)\n pred_rot = Variable(suncg_parse.bininds_to_quats(\n bin_inds, self.quat_medoids_relative), requires_grad=False)\n return pred_rot\n\n def decode_class(self, pred_class):\n opts = self.opts\n # pdb.set_trace()\n _, bin_inds = torch.max(pred_class.data.cpu(), 1)\n return bin_inds\n\n def count_number_pairs(self, rois):\n counts = Counter(rois[:,0].numpy().tolist())\n pairs = sum([v*v for (k,v) in counts.items() if v > 1])\n return pairs\n\n def set_input(self, batch):\n opts = self.opts\n if batch is None or not batch:\n self.invalid_batch = True\n self.invalid_rois = None\n return\n\n if batch['empty']:\n self.invalid_rois = None\n self.invalid_batch = True\n return\n\n bboxes_gt = suncg_parse.bboxes_to_rois(batch['bboxes'])\n # bboxes_proposals = suncg_parse.bboxes_to_rois(batch['bboxes_test_proposals'])\n bboxes_proposals = bboxes_gt\n rois = bboxes_proposals\n if rois.numel() <= 0 or bboxes_gt.numel() <= 0: # some proposals and gt objects should be there\n self.invalid_batch = True\n self.invalid_rois = None\n return\n else:\n if bboxes_gt.numel() == 5 and self.opts.only_pairs: \n self.invalid_rois = None\n self.invalid_batch = True\n return\n # if bboxes_gt.numel() > 8 * 5:\n # self.invalid_batch = True\n # return\n pairs = self.count_number_pairs(rois)\n # if pairs <= 1:\n # self.invalid_batch = True\n # self.invalid_rois = None\n # return\n self.invalid_batch = False\n\n self.image_names = batch['image_name']\n # Inputs for prediction\n if self.opts.load_predictions_from_disk:\n return\n\n\n\n input_imgs_fine = batch['img_fine'].type(torch.FloatTensor)\n input_imgs = batch['img'].type(torch.FloatTensor)\n\n self.input_imgs_layout = Variable(\n input_imgs.cuda(), requires_grad=False)\n\n for b in range(input_imgs_fine.size(0)):\n input_imgs_fine[b] = self.resnet_transform(input_imgs_fine[b])\n input_imgs[b] = self.resnet_transform(input_imgs[b])\n\n self.input_imgs = Variable(\n input_imgs.cuda(), requires_grad=False)\n\n self.input_imgs_fine = Variable(\n input_imgs_fine.cuda(), requires_grad=False)\n\n self.rois = Variable(\n rois.type(torch.FloatTensor).cuda(), requires_grad=False)\n\n\n code_tensors = suncg_parse.collate_codes(batch['codes'])\n code_tensors_quats = code_tensors['quat']\n self.amodal_bboxes = code_tensors['amodal_bbox']\n object_classes = code_tensors['class'].type(torch.LongTensor)\n self.class_gt = self.object_classes = Variable(object_classes.cuda(), requires_grad=False)\n \n self.object_locations = suncg_parse.batchify(code_tensors['trans'], self.rois[:, 0].data.cpu())\n code_tensors['shape'] = code_tensors['shape'].unsqueeze(1) # unsqueeze voxels\n \n vox2cams = code_tensors['transform_vox2cam']\n self.vox2cams = vox2cams = suncg_parse.batchify(vox2cams, self.rois[:,0].data.cpu())\n \n cam2voxs = code_tensors['transform_cam2vox']\n cam2voxs = suncg_parse.batchify(cam2voxs, self.rois[:,0].data.cpu())\n \n relative_direction_rotation = []\n relative_dir_mask = []\n for bx in range(len(cam2voxs)):\n nx = len(cam2voxs[bx])\n mask = torch.ones([nx*nx])\n for ix in range(nx):\n for jx in range(nx):\n if ix == jx:\n mask[ix*nx + jx] = 0\n directions = []\n for cam2vox in cam2voxs[bx][ix]:\n dt_trj_cam_frame = self.object_locations[bx][jx]\n direction = self.homogenize_coordinates(dt_trj_cam_frame.unsqueeze(0))\n direction = torch.matmul(cam2vox, direction.t()).t()[:,0:3]\n direction = direction/(torch.norm(direction,p=2, dim=1, keepdim=True) + 1E-5)\n directions.append(direction)\n directions = torch.cat(directions)\n relative_direction_rotation.append(directions)\n relative_dir_mask.append(mask)\n self.relative_dir_mask = Variable(torch.cat(relative_dir_mask,dim=0).byte()).cuda()\n\n assert self.opts.batch_size == 1, 'batch size > 1 not supported'\n\n if opts.classify_dir and not opts.gmm_dir:\n self.relative_direction_rotation = relative_direction_rotation\n relative_direction_rotation_binned = [suncg_parse.directions_to_bininds(t, self.direction_medoids) for t in relative_direction_rotation]\n # relative_direction_rotation_directions = [suncg_parse.bininds_to_directions(t, self.direction_medoids) for t in relative_direction_rotation_binned]\n # ## compute some average error?\n # error = torch.cat([(1 - t1*t2.sum(1)).mean() for (t1, t2) in zip(relative_direction_rotation, relative_direction_rotation_directions)]).mean()\n # # pdb.set_trace()\n self.relative_direction_rotation_binned = [Variable(t).cuda() for t in relative_direction_rotation_binned]\n\n \n self.object_locations = [Variable(t_.cuda(), requires_grad=False) for t_ in\n self.object_locations]\n\n self.object_scales = suncg_parse.batchify(code_tensors['scale'] + 1E-10, self.rois[:, 0].data.cpu())\n self.object_scales = [Variable(t_.cuda(), requires_grad=False) for t_ in\n self.object_scales]\n\n self.relative_trans_gt = []\n self.relative_scale_gt = []\n for bx in range(len(self.object_locations)):\n relative_locations = self.object_locations[bx].unsqueeze(0) - self.object_locations[bx].unsqueeze(1)\n relative_locations = relative_locations.view(-1, 3)\n self.relative_trans_gt.append(relative_locations)\n # this is in log scale.\n relative_scales = self.object_scales[bx].unsqueeze(0).log() - self.object_scales[bx].unsqueeze(1).log()\n relative_scales = relative_scales.view(-1, 3)\n self.relative_scale_gt.append(relative_scales)\n\n self.relative_scale_gt = torch.cat(self.relative_scale_gt, dim=0) # this is in log scale\n self.relative_trans_gt = torch.cat(self.relative_trans_gt, dim=0)\n self.relative_gt = {'relative_trans' : self.relative_trans_gt,\n 'relative_scale' : self.relative_scale_gt,\n 'relative_dir' : self.relative_direction_rotation,\n 'relative_mask' : self.relative_dir_mask,\n }\n \n # self.layout_gt=Variable(\n # batch['layout'].cuda(), requires_grad=False)\n\n self.codes_gt_quats = [\n Variable(t.cuda(), requires_grad=False) for t in code_tensors_quats]\n codes_gt_keys = ['shape', 'scale', 'trans']\n self.codes_gt ={key : Variable(code_tensors[key].cuda(), requires_grad=False) \n for key in codes_gt_keys}\n self.codes_gt['quat'] = self.codes_gt_quats\n\n self.rois_gt=Variable(\n bboxes_gt.type(torch.FloatTensor).cuda(), requires_grad=False)\n if self.downsample_voxels:\n self.codes_gt['shape']=self.downsampler.forward(self.codes_gt['shape'])\n return\n\n def convert_multiple_bins_to_probabilites(self, bins_ids, num_medoids, no_noise=1.0):\n \n bins = [torch.LongTensor([random.choice(c.data)]) for c in bins_ids]\n noise_values = torch.bernoulli(torch.FloatTensor(len(bins)).zero_() + no_noise)\n bins = [c if n > 0.5 else torch.LongTensor([np.random.randint(num_medoids)]) for c, n in zip(bins, noise_values)]\n bins = torch.cat(bins)\n probs = torch.FloatTensor(len(bins), num_medoids).zero_()\n probs.scatter_(1, bins.unsqueeze(1), 1-0.001*num_medoids)\n probs = probs + 0.001\n return probs\n\n '''\n args\n relative_directions : list N^2, torch.Tensor K x 3\n vox2cams : list N , K x 4,4\n img_size : (H, W)\n returns:\n relative_directions in image plane N^2 K x 2 x 2\n '''\n def convert_relative_vectors_to_image_plane(self, relative_directions, vox2cams, img_size):\n def convert_vector_to_image_plane(vector, vox2cam, cam_intrinsic, img_size):\n vector_cam_frame = suncg_parse.transform_coordinates(vox2cam, vector.reshape(1, -1))\n img_frame = suncg_parse.transform_to_image_coordinates(vector_cam_frame, cam_intrinsic)\n img_frame = np.clip(img_frame, a_min=np.array([[0,0]]), a_max=np.array([[img_size[1], img_size[0]]]))\n return img_frame\n\n cam_intrinsic = suncg_parse.cam_intrinsic()\n img_vectors = []\n n_objects = len(vox2cams)\n for ix, rel_dir in enumerate(relative_directions):\n rel_dir = rel_dir[0]\n vox2cam = vox2cams[ix//n_objects][0]\n src_vector = convert_vector_to_image_plane(np.array([0,0,0]), vox2cam.numpy(), cam_intrinsic, img_size)\n trj_vector = convert_vector_to_image_plane(rel_dir.numpy(), vox2cam.numpy(), cam_intrinsic, img_size)\n img_vectors.append(np.concatenate([src_vector, trj_vector], axis=0))\n\n index = [ix*n_objects + ix for ix in range(n_objects)]\n img_vectors = [(img_vectors[ix], ix//n_objects, ix % n_objects) for ix in range(n_objects*n_objects) if ix not in index]\n return img_vectors\n\n # def save_current_stats(self, bench):\n # imgs_dir=osp.join(self.opts.results_vis_dir, 'vis_iter_{}'.format(self.vis_iter))\n # if not os.path.exists(imgs_dir):\n # os.makedirs(imgs_dir)\n # json_file=os.path.join(imgs_dir, 'bench_iter_{}.json'.format(0))\n # # print(json_file)\n # with open(json_file, 'w') as f:\n # json.dump({'bench': bench}, f)\n\n def save_layout_mesh(self, mesh_dir, layout, prefix='layout'):\n opts=self.opts\n layout_vis=layout.data[0].cpu().numpy().transpose((1, 2, 0))\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n vs, fs=render_utils.dispmap_to_mesh(\n layout_vis,\n suncg_parse.cam_intrinsic(),\n scale_x=self.opts.layout_width / 640,\n scale_y=self.opts.layout_height / 480\n )\n fout=open(mesh_file, 'w')\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n fout=open(mesh_file, 'w')\n render_utils.append_obj(fout, vs, fs)\n fout.close()\n\n def save_codes_mesh(self, mesh_dir, code_vars, prefix='codes'):\n opts=self.opts\n n_rois=code_vars['shape'].size()[0]\n code_list=suncg_parse.uncollate_codes(code_vars, self.input_imgs.data.size(0), torch.Tensor(n_rois).fill_(0))\n\n if not os.path.exists(mesh_dir):\n os.makedirs(mesh_dir)\n mesh_file=osp.join(mesh_dir, prefix + '.obj')\n new_codes_list = suncg_parse.convert_codes_list_to_old_format(code_list[0])\n render_utils.save_parse(mesh_file, new_codes_list, save_objectwise=False, thresh=0.1)\n\n def render_visuals(self, mesh_dir, obj_name=None):\n png_dir=osp.join(mesh_dir, 'rendering')\n if obj_name is not None:\n render_utils.render_mesh(osp.join(mesh_dir, obj_name + '.obj'), png_dir)\n im_view1=scipy.misc.imread(osp.join(png_dir, '{}_render_000.png'.format(obj_name)))\n # im_view2=scipy.misc.imread(osp.join(png_dir, '{}_render_003.png'.format(obj_name)))\n else:\n render_utils.render_directory(mesh_dir, png_dir)\n im_view1=scipy.misc.imread(osp.join(png_dir, 'render_000.png'))\n # im_view2=scipy.misc.imread(osp.join(png_dir, 'render_003.png'))\n\n # return im_view1, im_view2\n return im_view1\n\n\n\n def get_current_visuals(self):\n visuals={}\n opts=self.opts\n visuals['img']=visutil.tensor2im(visutil.undo_resnet_preprocess(\n self.input_imgs_fine.data))\n rois=self.rois.data\n visuals['img_roi']=render_utils.vis_detections(visuals['img'], rois[:, 1:])\n\n # img_rel_vectors_pred = self.convert_relative_vectors_to_image_plane([[x] for x in self.relative_direction_prediction_3d], \n # self.vox2cams[0], (self.opts.img_height_fine, self.opts.img_width_fine))\n # img_rel_vectors_gt = self.convert_relative_vectors_to_image_plane(self.relative_direction_rotation,\n # self.vox2cams[0], (self.opts.img_height_fine, self.opts.img_width_fine))\n # visuals['img_rel_dir_pred']=render_utils.vis_relative_dirs(visuals['img_roi'], img_rel_vectors_pred)\n # visuals['img_rel_dir_gt']=render_utils.vis_relative_dirs(visuals['img_roi'], img_rel_vectors_gt)\n\n \n # mesh_dir=osp.join(opts.rendering_dir, opts.name)\n # return visuals\n\n mesh_dir=osp.join(opts.rendering_dir)\n # vis_codes=[self.codes_pred_vis, self.codes_gt]\n vis_codes=[self.codes_pred_eval, self.codes_gt]\n # vis_codes=[self.codes_gt]\n # vis_layouts = [self.layout_pred, self.layout_gt]\n vis_names=['b_pred', 'c_gt']\n # vis_names=['c_gt']\n # vis_names=['b_pred']\n for vx, v_name in enumerate(vis_names):\n os.system('rm {}/*.obj'.format(mesh_dir))\n self.save_codes_mesh(mesh_dir, vis_codes[vx])\n # self.save_layout_mesh(mesh_dir, vis_layouts[vx])\n\n # visuals['{}_layout_cam_view'.format(v_name)], visuals['{}_layout_novel_view'.format(v_name)] = self.render_visuals(\n # mesh_dir, obj_name='layout')\n # visuals['{}_objects_cam_view'.format(v_name)], visuals['{}_objects_novel_view'.format(v_name)]=self.render_visuals(\n # mesh_dir, obj_name='codes')\n # visuals['{}_scene_cam_view'.format(v_name)], visuals['{}_scene_novel_view'.format(v_name)]=self.render_visuals(\n # mesh_dir)\n visuals['{}_objects_cam_view'.format(v_name)] =self.render_visuals(mesh_dir, obj_name='codes')\n # visuals['{}_scene_cam_view'.format(v_name)] =self.render_visuals(mesh_dir)\n return visuals\n\n\n def filter_pos(self, codes, pos_inds):\n pos_inds=torch.from_numpy(np.array(pos_inds)).squeeze()\n t = torch.LongTensor\n\n if type(codes) == dict:\n key = 'shape'\n if isinstance(codes[key], torch.autograd.Variable):\n if isinstance(codes[key].data, torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n elif isinstance(codes[key], torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n\n\n pos_inds=torch.autograd.Variable(\n pos_inds.type(t), requires_grad=False)\n filtered_codes= {k : torch.index_select(code, 0, pos_inds) for k, code in codes.items()}\n\n else:\n if isinstance(codes[0], torch.autograd.Variable):\n if isinstance(codes[0].data, torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n elif isinstance(codes[0], torch.cuda.FloatTensor):\n t = torch.cuda.LongTensor\n\n pos_inds =torch.autograd.Variable(\n pos_inds.type(t), requires_grad=False)\n filtered_codes = [torch.index_select(code, 0, pos_inds) for code in codes]\n return filtered_codes\n\n\n def compute_entropy(self, log_probs):\n return np.sum(-1*np.exp(log_probs)*log_probs, axis=-1)\n\n def update_locations(self, trans_location, relative_locations):\n n_objects=trans_location.size(0)\n # lmbda = min(n_objects*n_objects, 5)\n # lmbda = max(n_objects, 5)\n lmbda=1.0\n relative_locations=relative_locations.numpy()\n trans_location=trans_location.numpy()\n A=np.zeros((n_objects * n_objects, n_objects))\n b=np.zeros((n_objects * n_objects, 3))\n index=0\n for i in range(n_objects):\n for j in range(n_objects):\n if i == j:\n continue\n # don't add the constraint if it is farther than a particular distance\n dist=np.linalg.norm(relative_locations[i * n_objects + j])\n if dist < 10:\n A[index][i]=-1\n A[index][j]=1\n b[index]=relative_locations[i * n_objects + j]\n index += 1\n for i in range(n_objects):\n A[index][i]=lmbda * 1\n b[index]=lmbda * trans_location[i]\n index += 1\n A=A[0:index]\n b=b[0:index]\n # pdb.set_trace()\n new_location=np.linalg.lstsq(A, b)\n # source_matrix = np.cat([np.zeros(n_objects-1, n_objects)\n return torch.from_numpy(new_location[0]), np.linalg.norm(new_location[0] - trans_location, axis=1).tolist()\n # return torch.from_numpy(trans_location)\n\n\n def save_predictions_to_pkl(self, dict_of_outputs):\n pkl_file_name = osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))\n \n def recursive_convert_to_numpy(elem):\n if isinstance(elem, collections.Mapping):\n return {key: recursive_convert_to_numpy(elem[key]) for key in elem}\n elif isinstance(elem, str):\n return elem\n elif isinstance(elem, collections.Sequence):\n return [recursive_convert_to_numpy(samples) for samples in elem]\n elif isinstance(elem, torch.FloatTensor):\n return elem.numpy()\n elif isinstance(elem, torch.cuda.FloatTensor):\n return elem.cpu().numpy()\n elif isinstance(elem, torch.LongTensor):\n return elem.numpy()\n elif isinstance(elem, torch.cuda.LongTensor):\n return elem.cpu().numpy()\n elif isinstance(elem, torch.autograd.Variable):\n return recursive_convert_to_numpy(elem.data)\n else:\n return elem\n\n new_dict = recursive_convert_to_numpy(dict_of_outputs)\n with open(pkl_file_name, 'wb') as f:\n pickle.dump(new_dict, f)\n\n def convert_pkl_to_predictions(self, ):\n pkl_file_name = osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))\n def recursive_convert_to_torch(elem):\n if isinstance(elem, collections.Mapping):\n return {key: recursive_convert_to_torch(elem[key]) for key in elem}\n elif isinstance(elem, str):\n return elem\n elif isinstance(elem, collections.Sequence):\n return [recursive_convert_to_torch(samples) for samples in elem]\n elif isinstance(elem, np.ndarray):\n if elem.dtype == np.int32:\n torch.from_numpy(elem).long()\n else:\n return torch.from_numpy(elem).float()\n else:\n return elem\n with open(pkl_file_name, 'rb') as f:\n predictions = pickle.load(f)\n predictions = recursive_convert_to_torch(predictions)\n predictions['gt_codes'] = [Variable(k) for k in predictions['gt_codes']]\n predictions['pred_codes'] = [Variable(k) for k in predictions['pred_codes']]\n predictions['object_class_gt'] = Variable(predictions['object_class_gt']).long()\n predictions['rois'] = Variable(predictions['rois'])\n predictions['amodal_bboxes'] = predictions['amodal_bboxes']\n predictions['codes_gt_quats'] = [Variable(t) for t in predictions['codes_gt_quats']]\n\n \n try:\n predictions['relative_trans'] = Variable(predictions['relative_trans'])\n predictions['relative_scale'] = Variable(predictions['relative_scale'])\n predictions['relative_dir'] = Variable(predictions['relative_dir'])\n predictions['relative_gt'] = [Variable(k) for k in predictions['relative_gt']]\n predictions['trans_dependent_rotation'] = [k for k in predictions['trans_dependent_rotation']]\n predictions['trans_dependent_rotation_binned'] = [Variable(k).long() for k in predictions['trans_dependent_rotation_binned']]\n predictions['relative_quat_tensors_angles_gt'] = predictions['relative_quat_tensors_angles_gt']\n \n except KeyError as e:\n assert self.opts.pred_relative == False, 'relative outputs required'\n predictions['relative_trans'] = predictions['relative_scale'] = predictions['relative_quat'] = None\n predictions['relative_dir'] = predictions['relative_gt'] = predictions['trans_dependent_rotation'] = None\n predictions['trans_dependent_rotation_binned'] = predictions['relative_quat_tensors_angles_gt'] = None\n\n try:\n predictions['codes_quat_var'] = Variable(predictions['codes_quat_var'])\n except KeyError as e:\n assert self.opts.var_gmm_rot == False, 'var gmm rots given'\n predictions['codes_quat_var'] = None\n\n return predictions\n\n\n def predict(self):\n # pdb.set_trace()\n # codes_pred_all, trj_pred_all, labels_pred = self.model.forward((self.input_imgs_fine, self.input_imgs, self.rois))\n if not self.opts.load_predictions_from_disk:\n feed_dict = {}\n feed_dict['imgs_inp_fine'] = self.input_imgs_fine\n feed_dict['imgs_inp_coarse'] = self.input_imgs\n feed_dict['rois_inp'] = self.rois\n feed_dict['location_inp'] = self.object_locations\n feed_dict['class_inp'] = self.object_classes\n feed_dict['spatial_image'] = self.spatial_image\n\n model_pred , _ =self.model.forward(feed_dict)\n\n # pdb.set_trace()\n # codes_pred_all, labels_pred = model_pred[0], model_pred[-1]\n codes_pred_all = model_pred['codes_pred']\n if self.opts.gmm_rot and self.opts.var_gmm_rot:\n self.codes_quat_var = codes_pred_all['quat'][1].data.cpu().numpy()\n codes_pred_all['quat'] = torch.nn.functional.log_softmax(codes_pred_all['quat'][0])\n else:\n codes_pred_all['quat'] = torch.nn.functional.log_softmax(codes_pred_all['quat'])\n \n stuff_to_save = {'gt_codes' : self.codes_gt,\n 'pred_codes' : codes_pred_all, \n 'object_class_gt' : self.object_classes,\n 'rois' : self.rois, \n 'index2object' : self.index2object_class,\n 'amodal_bboxes' : self.amodal_bboxes,\n 'codes_gt_quats' : self.codes_gt_quats,}\n if self.opts.gmm_rot and self.opts.var_gmm_rot:\n stuff_to_save['codes_quat_var'] = self.codes_quat_var\n\n\n if self.opts.pred_relative:\n self.relative_predictions = model_pred['codes_relative']\n self.relative_trans_predictions = self.relative_predictions['relative_trans']\n self.relative_scale_predictions = self.relative_predictions['relative_scale']\n self.relative_direction_prediction = self.relative_predictions['relative_dir']\n\n if self.opts.gmm_dir:\n self.relative_direction_prediction = self.relative_direction_prediction[0]\n\n self.relative_direction_prediction = torch.nn.functional.log_softmax(self.relative_direction_prediction)\n stuff_to_save['relative_trans' ] = self.relative_trans_predictions\n stuff_to_save['relative_scale' ] = self.relative_scale_predictions\n stuff_to_save['relative_dir' ] = self.relative_direction_prediction\n stuff_to_save['relative_gt' ] = self.relative_gt\n stuff_to_save['trans_dependent_rotation' ] = self.relative_direction_rotation\n stuff_to_save['trans_dependent_rotation_binned' ] = self.relative_direction_rotation_binned\n if self.opts.pred_class:\n self.class_pred=model_pred['class_pred']\n # self.class_pred=model_pred['class_pred']\n labels_pred=model_pred['labels_pred']\n \n if self.opts.save_predictions_to_disk:\n self.save_predictions_to_pkl(stuff_to_save)\n assert osp.exists(osp.join(self.opts.results_eval_dir, \"{}_{}.pkl\".format(self.house_names[0], self.view_ids[0]))), 'pkl file does not exist'\n\n else:\n # if self.house_names[0] == '4c5edfb056c1f38d58482a05562d8c1d':\n # pdb.set_trace()\n predictions = self.convert_pkl_to_predictions()\n self.codes_gt = tuple(predictions['gt_codes'])\n codes_pred_all = tuple(predictions['pred_codes'])\n\n self.relative_trans_predictions = predictions['relative_trans']\n self.relative_scale_predictions = predictions['relative_scale']\n self.relative_quat_predictions = predictions['relative_quat' ]\n self.relative_direction_prediction = predictions['relative_dir'] \n self.relative_predictions = [self.relative_trans_predictions, self.relative_scale_predictions, self.relative_quat_predictions, self.relative_direction_prediction]\n self.relative_gt = predictions['relative_gt']\n self.relative_direction_rotation = predictions['trans_dependent_rotation']\n self.relative_direction_rotation_binned = predictions['trans_dependent_rotation_binned']\n self.object_classes = predictions['object_class_gt']\n self.rois = predictions['rois']\n self.index2object_class = predictions['index2object']\n self.amodal_bboxes = predictions['amodal_bboxes']\n self.relative_quat_tensors_angles_gt = predictions['relative_quat_tensors_angles_gt']\n self.codes_gt_quats = predictions['codes_gt_quats']\n self.codes_quat_var = predictions['codes_quat_var']\n\n n = codes_pred_all['shape'].size(0)\n labels_pred = Variable(torch.zeros(n, 1).cuda())\n scores_pred = labels_pred.cpu().data.numpy() * 0 + 1\n bboxes_pred = self.rois.data.cpu().numpy()[:, 1:]\n min_score_eval=np.minimum(0.05, np.max(scores_pred))\n # pos_inds_eval = metrics.nms(\n # np.concatenate((bboxes_pred, scores_pred), axis=1),\n # 0.3, min_score=min_score_eval)\n \n pos_inds_eval=[i for i in range(n)]\n\n self.codes_pred_eval=self.filter_pos(codes_pred_all, pos_inds_eval)\n # pdb.set_trace()\n\n opts=self.opts\n # updates for translation\n if opts.pred_relative:\n for i in range(1):\n if not opts.gt_updates:\n new_trans, self.update_norm=self.update_locations(\n self.codes_pred_eval['trans'].data.cpu(), self.relative_trans_predictions.data.cpu())\n else:\n new_trans, self.update_norm=self.update_locations(\n self.codes_pred_eval['trans'].data.cpu(), self.relative_gt['relative_trans'].data.cpu())\n self.new_trans=new_trans\n if opts.do_updates:\n self.codes_pred_eval['trans']=Variable(new_trans)\n else:\n self.update_norm=torch.mean(self.codes_pred_eval['trans'] * 0, dim=1).data.cpu().numpy().tolist()\n\n if opts.pred_relative:\n for i in range(1):\n if not opts.gt_updates:\n new_scale, _ =self.update_locations(\n self.codes_pred_eval['scale'].data.cpu().log(), self.relative_predictions['relative_scale'].data.cpu())\n else:\n new_scale, _ =self.update_locations(\n self.codes_pred_eval['scale'].data.cpu().log(), self.relative_gt['relative_scale'].data.cpu())\n \n new_scale=new_scale.exp()\n self.new_scale=new_scale\n if opts.do_updates:\n self.codes_pred_eval['scale']=Variable(new_scale)\n\n quats_gt_binned = [suncg_parse.quats_to_bininds(q.data.cpu(), self.quat_medoids) for q in self.codes_gt['quat']]\n quats_gt_binned = [Variable(q) for q in quats_gt_binned]\n quats_gt_binned_probs = Variable(self.convert_multiple_bins_to_probabilites(quats_gt_binned, opts.nz_rot, no_noise=1.0)).log()\n \n self.codes_pred_quat_before = Variable(self.codes_pred_eval['quat'].data.clone())\n self.entropy_before_optim = (-1 * self.codes_pred_eval['quat'] * self.codes_pred_eval['quat'].exp()).sum(1).data.cpu().numpy()\n\n if opts.pred_relative and opts.classify_dir and opts.do_updates:\n if opts.gt_updates:\n relative_direction_prediction = self.convert_multiple_bins_to_probabilites(self.relative_direction_rotation_binned, opts.nz_rel_dir).log().numpy()\n self.relative_direction_prediction = Variable(torch.from_numpy(relative_direction_prediction).cuda())\n else:\n relative_direction_prediction = self.relative_direction_prediction.data.cpu().numpy()\n self.relative_direction_prediction_3d = relative_direction_prediction\n\n absolute_locations = self.codes_pred_eval['trans'].data.cpu().numpy()\n # absolute_locations = self.codes_gt[3].data.cpu().numpy()\n absolute_log_probabilites = self.codes_pred_eval['quat'].data.cpu().numpy()\n n_objects = len(absolute_log_probabilites)\n n_absoulte_bins = absolute_log_probabilites.shape[1]\n relative_direction_prediction = relative_direction_prediction.reshape(n_objects, n_objects, -1)\n n_relative_bins = relative_direction_prediction.shape[2]\n bin_scores = np.zeros((n_objects, n_objects, n_absoulte_bins))\n quat_medoids = self.quat_medoids.numpy()\n direction_medoids = self.direction_medoids.numpy()\n new_probability = absolute_log_probabilites\n # lambda_weight = opts.lambda_weight \n # lambda_weight = opts.lambda_weight * 1./np.sqrt(n_objects)\n lambda_weight = opts.lambda_weight * 1./n_objects\n adaptive_weight = np.ones(n_objects)\n # pdb.set_trace()\n\n for nx in range(n_objects):\n src_c = self.index2object_class[self.object_classes.data[nx, 0]]\n ignore_bin_scores = False\n # if src_c == 'table':\n # ignore_bin_scores = True\n # continue\n for mx in range(n_objects):\n if mx == nx:\n continue\n expected_direction = absolute_locations[mx] - absolute_locations[nx] ## make it unit norm\n dist = (1E-5 + np.linalg.norm(expected_direction))\n if dist > 4:\n continue\n\n expected_direction = expected_direction/ (1E-5 + np.linalg.norm(expected_direction))\n expected_direction = expected_direction.reshape(1, -1)\n alignment_scores = []\n indices = []\n entropy = -1*np.sum(np.exp(relative_direction_prediction[nx, mx]) * relative_direction_prediction[nx, mx])\n # if entropy > 2:\n # continue\n # adaptive_weight[nx] += 1\n # pdb.set_trace()\n for abinx in range(n_absoulte_bins):\n prob_bin = absolute_log_probabilites[nx][abinx]\n quaternion_abinx = quat_medoids[abinx]\n rotation = transformations.quaternion_matrix(quaternion_abinx)\n transform = rotation.copy()\n transform[0:3, 3] = np.array(absolute_locations[nx], copy=True)\n \n # translation = suncg_parse.trans_transform(absolute_locations[nx])\n # transform = np.matmul(rotation, translation)\n \n relative_direction = direction_medoids\n predicted_direction = suncg_parse.transform_coordinates(transform, relative_direction) -absolute_locations[nx].reshape(1, -1)\n # # log_alignment_score = (1 - np.matmul(expected_direction, predicted_direction.transpose()).squeeze()) #* relative_direction_prediction[nx, mx]\n # alignment_score = np.matmul(expected_direction, predicted_direction.transpose()).squeeze()\n # alignment_score = (alignment_score > 0.95) * alignment_score\n # alignment_score = alignment_score * np.exp(relative_direction_prediction[nx, mx])\n # alignment_score = np.log(np.sum(alignment_score) + 1E-5)\n # pdb.set_trace()\n\n alignment_score = (1 - np.matmul(expected_direction, predicted_direction.transpose()).squeeze())\n index = np.argmin(alignment_score, axis=0)\n alignment_score = np.min(alignment_score, axis=0) + relative_direction_prediction[nx, mx, index]# absolute_log_probabilites[nx][abinx]\n alignment_score = np.min(relative_direction_prediction[nx, mx, index])\n alignment_scores.append(alignment_score)\n # indices.append(index)\n \n\n temp = np.array([metrics.quat_dist(quat_medoids[0], quat_medoids[k]) for k in range(0,24)]).round(2)\n alignment_scores = np.exp(np.array(alignment_scores))\n alignment_scores = np.log(alignment_scores/np.sum(alignment_scores) + 1E-10)\n bin_scores[nx,mx,:] = alignment_scores\n bin_scores = np.sum(bin_scores, axis=1)\n bin_scores = np.exp(bin_scores)\n bin_scores = np.log(1E-10 + bin_scores/np.sum(bin_scores, 1, keepdims=True))\n if ignore_bin_scores == True:\n bin_scores = bin_scores * 0\n \n # pdb.set_trace()\n abs_ent = self.compute_entropy(new_probability).reshape(-1, 1)\n rel_ent = self.compute_entropy(bin_scores).reshape(-1, 1)\n # pdb.set_trace()\n new_probability = 1.0 * new_probability + np.minimum(lambda_weight, 1.0)*(1*bin_scores) + 0.00 * quats_gt_binned_probs.data.cpu().numpy()\n new_probability = torch.from_numpy(new_probability).float()\n new_probability = torch.nn.functional.normalize(new_probability.exp(),1)\n self.codes_pred_eval['quat'] = Variable(new_probability.cuda())\n self.entropy_after_optim = (-1 * self.codes_pred_eval['quat'] * (self.codes_pred_eval['quat'] + 1E-10).log()).sum(1).data.cpu().numpy()\n\n\n\n\n self.rois_pos_eval=self.filter_pos([self.rois], pos_inds_eval)[0] # b x 5, 1:5 is box (x1 y1 x2 y2)\n self.codes_pred_eval['shape']=self.decode_shape(self.codes_pred_eval['shape']) # b x 1 x 32 x 32 x 32\n\n # if self.opts.gmm_rot:\n # self.codes_pred_eval[2] = self.decode_rotation_slerp(self.codes_pred_eval[2], \n # else:\n self.codes_pred_eval['quat']=self.decode_rotation(self.codes_pred_eval['quat']) # b x 4\n self.codes_pred_quat_before = self.decode_rotation(self.codes_pred_quat_before)\n # self.codes_pred_eval[2]=self.decode_rotation_topk(self.codes_pred_eval[2]) # b x 4\n \n # self.codes_pred_eval[2] = suncg_parse.quats_to_bininds(self.codes_gt[2].data.cpu(), self.quat_medoids)\n # self.codes_pred_eval[2] = Variable(suncg_parse.bininds_to_quats(self.codes_pred_eval[2], self.quat_medoids))\n \n self.codes_pred_eval['scale'] # Probably scale b x 3\n self.codes_pred_eval['trans'] # Probably trans b x 3\n\n self.scores_pred_eval=scores_pred[pos_inds_eval, :] * 1.\n if opts.pred_class:\n self.class_pred=self.decode_class(self.class_pred)\n # pdb.set_trace()\n min_score_vis=np.minimum(0.7, np.max(scores_pred))\n # pos_inds_vis = metrics.nms(\n # np.concatenate((bboxes_pred, scores_pred), axis=1),\n # 0.3, min_score=min_score_vis)\n\n pos_inds_vis=[i for i in range(n)]\n self.codes_pred_vis=self.filter_pos(codes_pred_all, pos_inds_vis)\n self.rois_pos_vis=self.filter_pos([self.rois], pos_inds_vis)[0]\n self.codes_pred_vis['shape']=self.decode_shape(self.codes_pred_vis['shape'])\n # self.codes_pred_vis[2]=self.decode_rotation(self.codes_pred_vis[2])\n self.codes_pred_vis['quat']=self.codes_pred_eval['quat']\n\n # self.layout_pred = self.layout_model.forward(self.input_imgs_layout)\n\n def clamp_to_image(self, amodal_bboxes, img_size):\n return torch.stack([torch.clamp(amodal_bboxes[:,0], 0, img_size[1]),\n torch.clamp(amodal_bboxes[:,1], 0, img_size[0]),\n torch.clamp(amodal_bboxes[:,2], 0, img_size[1]),\n torch.clamp(amodal_bboxes[:,3], 0, img_size[0])], dim=1)\n\n def compute_object_presence_parameters(self, amodal_bboxes, roi_bboxes, img_size):\n ## Compute % visible in the image\n ## % Overlap with other objects not visble\n ammodal_bboxes_clip_to_image = self.clamp_to_image(amodal_bboxes, img_size)\n size_box = amodal_bboxes[:,2:4] - amodal_bboxes[:,0:2]\n area_box = size_box[:,0]*size_box[:,1]\n\n size_image_box = ammodal_bboxes_clip_to_image[:,2:4] - ammodal_bboxes_clip_to_image[:,0:2]\n area_image_box = size_image_box[:,0]*size_image_box[:,1]\n\n size_roi_box = roi_bboxes[:,2:4] - roi_bboxes[:,0:2]\n area_roi_box = size_roi_box[:,0]*size_roi_box[:,1]\n\n return area_image_box/(area_box + 1), area_roi_box/(area_image_box + 1)\n\n def evaluate(self):\n # rois as numpy array\n # Get Predictions.\n # pdb.set_trace()\n opts = self.opts\n shapes = self.codes_pred_eval['shape'] \n scales = self.codes_pred_eval['scale']\n rots = self.codes_pred_eval['quat']\n trans = self.codes_pred_eval['trans']\n rots_before = self.codes_pred_quat_before\n trans=trans\n scores=self.scores_pred_eval\n boxes=self.rois_pos_eval.cpu().data.numpy()[:, 1:]\n # Get Ground Truth.\n # pdb.set_trace()\n gt_shapes = self.codes_gt['shape']\n gt_scales = self.codes_gt['scale']\n gt_rots = self.codes_gt['quat']\n gt_trans = self.codes_gt['trans']\n\n\n gt_boxes=self.rois.cpu().data.numpy()[:, 1:]\n iou_box=bbox_utils.bbox_overlaps(boxes.astype(np.float), gt_boxes.astype(np.float))\n trans_, gt_trans_=trans.cpu().data.numpy(), gt_trans.cpu().data.numpy()\n err_trans=np.linalg.norm(np.expand_dims(trans_, 1) - np.expand_dims(gt_trans_, 0), axis=2)\n err_pwd=np.zeros([len(err_trans)])\n\n\n err_rel_quat = (0*err_pwd).tolist()\n acc_rel = (0*err_pwd).tolist()\n # object_presence, object_visibility = self.compute_object_presence_parameters(self.amodal_bboxes, self.rois[:, 1:].data.cpu(),\n # [opts.img_height_fine, opts.img_width_fine])\n\n n_objects=len(gt_rots)\n\n acc_rel_dir_conditions = []\n acc_rel_dir = []\n err_rel_dir = []\n\n if opts.pred_relative:\n indices=[i + i * n_objects for i in range(n_objects)]\n\n if self.opts.classify_dir:\n relative_direction_predictions_classes = self.get_class_indices(self.relative_direction_prediction)\n entropy = -1*(self.relative_direction_prediction * self.relative_direction_prediction.exp()).sum(1).data.cpu().numpy()\n relative_direction_prediction = suncg_parse.bininds_to_directions(relative_direction_predictions_classes, self.direction_medoids)\n else:\n relative_direction_prediction = self.relative_direction_prediction.data.cpu()\n\n # pdb.set_trace()\n # pdb.set_trace()\n for i, (pred_dir, gt_dirs) in enumerate(zip(relative_direction_prediction, self.relative_direction_rotation)):\n # for i, (pred_dir, gt_dirs) in enumerate(zip(self.relative_direction_rotation, self.relative_direction_rotation)):\n if i in indices:\n continue\n\n src_i = i //n_objects\n src_c = self.index2object_class[self.object_classes.data[src_i, 0]]\n # if src_c != 'desk':\n # continue\n\n min_err = 180\n for gt_dir in gt_dirs:\n min_err = min(min_err, metrics.direction_dist(pred_dir.numpy(), gt_dir.numpy()))\n err_rel_dir.append(min_err)\n state = -1\n err_angle_iter = iter(err_rel_dir)\n if opts.classify_dir:\n for i, (pred, gt_bins, pred_dir, gt_dirs) in enumerate(zip(relative_direction_predictions_classes, self.relative_direction_rotation_binned,\n relative_direction_prediction, self.relative_direction_rotation)):\n if i in indices:\n continue\n if pred in gt_bins.data.cpu():\n acc_rel_dir.append(1)\n state = 1\n else:\n acc_rel_dir.append(0)\n state = 0\n src_i = i//n_objects\n trj_i = i % n_objects\n src_c = self.index2object_class[self.object_classes.data[src_i, 0]]\n trj_c = self.index2object_class[self.object_classes.data[trj_i, 0]]\n t = (\"{}\".format(self.image_names[0]), src_i, trj_i, pred, gt_bins.data.cpu().numpy(),\n np.linalg.norm(gt_trans_[src_i]- gt_trans_[trj_i]), state, \n next(err_angle_iter),\n src_c, trj_c,\n 0, 0,\n 0, 0, entropy[i])\n acc_rel_dir_conditions.append(t)\n else:\n acc_rel_dir.append(0)\n\n\n scales_, gt_scales_=scales.cpu().data.numpy(), gt_scales.cpu().data.numpy()\n err_scales=np.mean(np.abs(np.expand_dims(np.log(scales_), 1) - np.expand_dims(np.log(gt_scales_), 0)), axis=2)\n err_scales /= np.log(2.0)\n\n gt_quats = [t.data.cpu() for t in self.codes_gt_quats]\n\n\n ndt, ngt=err_scales.shape\n err_shapes=err_scales * 0.\n err_rots=err_scales * 0.\n err_rots_before = err_scales * 0\n # pdb.set_trace()\n for i in range(ndt):\n for j in range(ngt):\n err_shapes[i, j]=metrics.volume_iou(shapes[i, 0].data, gt_shapes[\n j, 0].data, thresh=self.opts.voxel_eval_thresh)\n if len(rots[i]) == 4:\n # err_rots[i, j]=metrics.quat_dist(rots[i].data.cpu(), gt_rots[j].data.cpu())\n q_errs = []\n for quat in gt_quats[j]:\n q_errs.append(metrics.quat_dist(rots[i].data.cpu(), quat))\n err_rots[i, j] = min(q_errs)\n else:\n m1 = metrics.quat_dist(rots[i][0].data.cpu(), gt_rots[j].data.cpu())\n m2 = metrics.quat_dist(rots[i][1].data.cpu(), gt_rots[j].data.cpu())\n err_rots[i, j] = min(m1, m2)\n\n for i in range(ndt):\n for j in range(ngt):\n err_shapes[i, j]=metrics.volume_iou(shapes[i, 0].data, gt_shapes[\n j, 0].data, thresh=self.opts.voxel_eval_thresh)\n if len(rots_before[i]) == 4:\n # err_rots[i, j]=metrics.quat_dist(rots[i].data.cpu(), gt_rots[j].data.cpu())\n q_errs = []\n for quat in gt_quats[j]:\n q_errs.append(metrics.quat_dist(rots_before[i].data.cpu(), quat))\n err_rots_before[i, j] = min(q_errs)\n else:\n m1 = metrics.quat_dist(rots_before[i][0].data.cpu(), gt_rots[j].data.cpu())\n m2 = metrics.quat_dist(rots_before[i][1].data.cpu(), gt_rots[j].data.cpu())\n err_rots_before[i, j] = min(m1, m2)\n\n err_rots=np.diag(err_rots).tolist()\n acc_rots = [1 if err < 30 else 0 for err in err_rots]\n err_rots_before = np.diag(err_rots_before).tolist()\n acc_rots_before = [1 if err < 30 else 0 for err in err_rots_before]\n err_trans=np.diag(err_trans).tolist()\n err_scales=np.diag(err_scales).tolist()\n err_pwd=err_pwd.tolist()\n err_shapes = np.diag(err_shapes).tolist()\n\n image_name = \"{}\".format(self.image_names[0])\n absolute_rot_conditions = []\n # pdb.set_trace()\n if self.opts.var_gmm_rot:\n self.codes_quat_var = np.sqrt(np.exp(self.codes_quat_var))*180/np.pi\n else:\n self.codes_quat_var = np.zeros([len(err_rots), self.opts.nz_rot])\n for ox in range(len(err_rots)):\n bf_class = suncg_parse.quats_to_bininds(rots_before[ox].data.unsqueeze(0), self.quat_medoids)[0]\n af_class = suncg_parse.quats_to_bininds(rots[ox].data.unsqueeze(0), self.quat_medoids)[0]\n t = (image_name, ox, self.index2object_class[self.object_classes.data[ox, 0]], err_rots_before[ox], err_rots[ox],\n self.entropy_before_optim[ox], self.entropy_after_optim[ox], bf_class, af_class,\n metrics.quat_dist(rots_before[ox].data, rots[ox].data), self.codes_quat_var[ox][bf_class])\n # pdb.set_trace()\n absolute_rot_conditions.append(t)\n\n\n # absolute_rot_conditions = [(err_b, err, entp_be, entp_af, metrics.quat_dist(rot_b.data, rot_af.data)) for err_b, err, entp_be, entp_af, rot_b, rot_af in zip(err_rots_before,\n # err_rots, self.entropy_before_optim, self.entropy_after_optim, rots_before, rots)]\n\n # for i in range(len(err_rots_before)):\n # if self.entropy_before_optim[i] < self.entropy_after_optim[i] - 1.0:\n # # pdb.set_trace()\n # err_rots[i] = err_rots_before[i]\n\n\n\n stats={'trans': err_trans, 'scales': err_scales,'shape': err_shapes, 'rot': err_rots, 'rot_b' : err_rots_before, 'acc_rots' : acc_rots, 'acc_rots_bef' : acc_rots_before,\n 'pwd': err_pwd, 'trans_updates': self.update_norm, 'acc_rot' : acc_rots, 'acc_rot_before' : acc_rots_before,\n # 'pwr': err_rel_quat, 'acc_rel_quat' : acc_rel_quat, \n 'acc_rel_dir' : acc_rel_dir, 'rel_dir': err_rel_dir\n }\n # print(stats)\n # pdb.set_trace()\n\n if opts.pred_class:\n correct=torch.sum(self.class_pred == self.object_classes.squeeze(1).data.cpu())\n total=len(self.class_pred)\n stats['correct']=correct\n stats['total']=total\n else:\n stats['correct']=0\n stats['total']=0\n \n if len(err_trans) == 1:\n stats = {}\n return stats, acc_rel_dir_conditions, absolute_rot_conditions\n\n def save_current_visuals(self, image_name):\n imgs_dir=osp.join(self.opts.results_quality_dir, '{}'.format(image_name))\n img_file = osp.join(imgs_dir, 'c_gt_objects_cam_view.png')\n if osp.exists(imgs_dir) and osp.exists(img_file) and False:\n return\n else:\n visuals=self.get_current_visuals()\n if not os.path.exists(imgs_dir) :\n os.makedirs(imgs_dir)\n for k in visuals:\n img_path=osp.join(imgs_dir, k + '.png')\n scipy.misc.imsave(img_path, visuals[k])\n\n def save_current_stats(self, bench, image_name):\n imgs_dir=osp.join(self.opts.results_quality_dir, '{}'.format(image_name))\n json_file=os.path.join(imgs_dir, 'bench_iter_{}.json'.format(0))\n # print(json_file)\n # if house_name == 'd49bb0b4b52cceffbe6086dfa1976e51':\n # pdb.set_trace()\n with open(json_file, 'w') as f:\n json.dump({'bench': bench}, f)\n\n def test_draw(self):\n opts=self.opts\n image_names=[]\n index_filename=opts.index_file\n if index_filename is not None:\n with open(index_filename) as f:\n for line in f:\n line=line.strip()\n image_names.append(line)\n\n # pdb.set_trace()\n # read the files for which you want to create visualizations?\n indices = []\n if len(image_names) == 0:\n rng_state = np.random.RandomState(0)\n indices = rng_state.choice(len(self.dataloader), 100)\n\n for i, batch in enumerate(self.dataloader):\n self.set_input(batch)\n self.vis_iter=i\n # print(i)\n if self.invalid_batch:\n continue\n image_name=batch['image_name'][0]\n example_id='{}'.format(image_name)\n if example_id in image_names or (len(image_names) == 0 and i in indices) :\n self.predict()\n bench_image_stats,_,_=self.evaluate()\n self.save_current_visuals(image_name)\n self.save_current_stats(bench_image_stats, image_name)\n print(\"Generating {}\".format(i))\n\n\n\n def test(self):\n # Choose 30 random examples and save it.\n opts=self.opts\n if not opts.preload_stats:\n invalid_rois=0\n bench_stats=[]\n acc_rel_conditions_all = []\n head = ['house_name_view_id', 'src_index', 'trj_index', 'PC', 'GT', 'D', 'Acc', 'Err', 'SC', 'TC', 'VS', 'VT', 'PS', 'PT', 'Entropy', 'Var']\n acc_rel_conditions_all.append(head)\n acc_rel_dir_conditions_all = [head]\n absolute_rot_conditions_all = [['house_name_view_id', 'obj_index', 'obj_class', 'err_before', 'err', 'entropy_before', 'entropy_after', 'rot_b', 'rot_af', 'diff_bw_bf_af']]\n\n # codes are (shapes, scales, quats, trans)\n n_iter=len(self.dataloader)\n for i, batch in enumerate(self.dataloader):\n if i % 100 == 0:\n print('{}/{} evaluation iterations.'.format(i, n_iter))\n if opts.max_eval_iter > 0 and (i >= opts.max_eval_iter):\n break\n self.set_input(batch)\n if not self.invalid_batch:\n self.predict()\n # pdb.seto_trace()\n image_name = batch['image_name'][0]\n bench_image_stats, acc_rel_dir_conditions, absolute_rot_conditions =self.evaluate()\n acc_rel_dir_conditions_all.extend(acc_rel_dir_conditions)\n absolute_rot_conditions_all.extend(absolute_rot_conditions)\n json_file=osp.join(opts.results_eval_dir, 'eval_result_{}.json'.format(image_name))\n bench_image_stats['image_name']=batch['image_name'][0]\n # pdb.set_trace()\n with open(json_file, 'w') as f:\n json.dump({'bench': bench_image_stats}, f)\n\n bench_stats.append(bench_image_stats)\n\n # if opts.save_visuals and (i % opts.visuals_freq == 0):\n # self.save_current_visuals()\n else:\n if self.invalid_rois is not None:\n print(\"Total rois {}\".format(self.invalid_rois.numel() / 5))\n invalid_rois += 1\n # if i > 10:\n # break\n # break\n\n print(\"% of RoI invalid {}\".format(invalid_rois * 100.0 / n_iter))\n\n\n # Accumalate stats and print\n acc_stats={'trans': [], 'scales': [], 'shape' : [], 'rot_b': [], 'rot': [], 'trans_updates': [],\n # 'pwr': [], 'acc_rel_quat' : [], \n 'acc_rel_dir' : [], 'rel_dir' : [], 'acc_rot' : [], 'acc_rot_before' : []}\n class_stats={'correct': [], 'total': []}\n for bench in bench_stats:\n for key in acc_stats.keys():\n if key in bench:\n acc_stats[key].extend(bench[key])\n for key in class_stats.keys():\n if key in bench:\n class_stats[key].append(bench[key])\n\n # acc_threshold = {'shape' : 0.25 , 'trans' : 1, 'rot_b' : 30, 'rot' : 30, 'scales':0.5}\n acc_threshold = {'shape' : 0.25 , 'trans' : 0.5, 'rot_b' : 30, 'rot' : 30, 'scales':0.2}\n for key, thres in acc_threshold.items():\n acc_stats[\"{}_acc\".format(key)] = [1 if v < thres else 0 for v in acc_stats[key]]\n\n # pdb.set_trace()\n json_file=os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.json'.format(opts.id, opts.eval_set, 0))\n\n print('Writing results to file: {:s}'.format(json_file))\n with open(json_file, 'w') as f:\n json.dump(acc_stats, f)\n else:\n json_file=os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.json'.format(opts.id, opts.eval_set, 0))\n with open(json_file) as f:\n acc_stats=json.load(f)\n\n # Print mean error and median error\n metrics={'mean': np.mean, 'median': np.median}\n criterias={'trans', 'scales', 'rot','rot_b', 'trans_updates', 'shape',\n # 'pwr', 'acc_rel_quat',\n 'acc_rel_dir', 'rel_dir', 'acc_rot', 'acc_rot_before',\n 'trans_acc', 'rot_b_acc', 'rot_acc', 'scales_acc', 'shape_acc'}\n\n for key in criterias:\n for mkey in metrics.keys():\n print('{} {} : {:0.3f}'.format(mkey, key, metrics[mkey](np.array(acc_stats[key]))))\n\n for key in acc_stats.keys():\n acc_stats[key]=np.array(acc_stats[key])\n\n # keys=['trans', 'scales', 'rot', 'pwd', 'trans_updates', 'pwr']\n key_clip={'shape' : 1.0, 'trans': 3.0, 'pwd': 5.0, 'scales': 1.5, 'rot_b': 180, 'rot': 180,'trans_updates': 4, 'pwr': 180 , 'rel_dir': 180}\n for key in criterias:\n err=acc_stats[key]\n if 'acc' in key:\n clip_max = 2\n continue\n else:\n clip_max=key_clip[key]\n values, base=np.histogram(np.clip(np.array(err), 0, clip_max), 40)\n cumulative=np.cumsum(values)\n cumulative=cumulative / len(err)\n plt.plot(cumulative, base[:-1], c='blue')\n plt.plot([0.0, 1.0], [np.mean(err), np.mean(err)], c='red')\n plt.title('Error {} vs data-fraction'.format(key))\n plt.savefig(os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.png'.format(opts.id, opts.eval_set, key)))\n plt.close()\n\n with open(os.path.join(FLAGS.results_eval_dir, 'eval_set_{}_{}_{}.pkl'.format(opts.id, opts.eval_set, key)) , 'wb') as f:\n pickle.dump({'err' : acc_stats[key], 'freq_values' : cumulative, 'bin_values': base[:-1]}, f)\n\n\n if self.opts.pred_class:\n correct=sum(class_stats['correct'])\n total=sum(class_stats['total'])\n print('{}, {}/{} {}'.format('class accuracy', correct, total, correct * 1.0 / total))\n\n\n if opts.log_csv is not None:\n with open(\"{}_rel_dir.csv\".format(opts.log_csv), 'w') as f:\n for acc_cond in acc_rel_dir_conditions_all:\n for val in list(acc_cond):\n if type(val) == np.ndarray:\n t = list(set([str(x) for x in val]))\n t.sort()\n f.write('{},'.format(';'.join(t)))\n else:\n if type(val) == str:\n f.write('{},'.format(val))\n else:\n f.write('{},'.format(str(np.round(val, 3))))\n f.write('\\n')\n\n if opts.log_csv is not None:\n with open(\"{}_abs_rot.csv\".format(opts.log_csv), 'w') as f:\n for entropy_cond in absolute_rot_conditions_all:\n for val in list(entropy_cond):\n if type(val) == np.ndarray:\n t = list(set([str(x) for x in val]))\n t.sort()\n f.write('{},'.format(';'.join(t)))\n else:\n if type(val) == str:\n f.write('{},'.format(val))\n else:\n f.write('{},'.format(str(np.round(val, 3))))\n f.write('\\n')\n\n\ndef main(_):\n FLAGS.suncg_dl_out_codes=True\n FLAGS.suncg_dl_out_fine_img=True\n FLAGS.suncg_dl_out_test_proposals=False\n FLAGS.suncg_dl_out_voxels=False\n FLAGS.suncg_dl_out_layout=False\n FLAGS.suncg_dl_out_depth=False\n # FLAGS.n_data_workers=4\n FLAGS.max_views_per_house=2\n \n\n FLAGS.batch_size=1\n assert(FLAGS.batch_size == 1)\n\n if FLAGS.results_name is None:\n FLAGS.results_name=FLAGS.name\n\n FLAGS.results_vis_dir=osp.join(FLAGS.results_vis_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.results_quality_dir=osp.join(FLAGS.results_quality_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.results_eval_dir=osp.join(FLAGS.results_eval_dir, 'box3d_base', FLAGS.eval_set, FLAGS.results_name)\n FLAGS.rendering_dir = osp.join(FLAGS.rendering_dir, FLAGS.results_name)\n if not os.path.exists(FLAGS.results_eval_dir):\n os.makedirs(FLAGS.results_eval_dir)\n if not os.path.exists(FLAGS.results_vis_dir):\n os.makedirs(FLAGS.results_vis_dir)\n\n torch.manual_seed(0)\n np.random.seed(0)\n random.seed(0)\n\n if not FLAGS.classify_rot:\n FLAGS.nz_rot=4\n\n\n if not FLAGS.classify_dir:\n FLAGS.nz_rel_dir=3\n\n tester=DWRTester(FLAGS)\n tester.init_testing()\n if not FLAGS.draw_vis:\n tester.test()\n else:\n tester.test_draw()\n\n # pred_clases = torch.cat(tester.stored_quat_relative_pred_classes).numpy()\n # gt_clases = torch.cat(tester.stored_quat_relative_gt_classes).numpy()\n\n # with open(osp.join(FLAGS.results_eval_dir, 'pred_relative_classes.npy'),'w') as f:\n # np.save(f, pred_clases)\n # with open(osp.join(FLAGS.results_eval_dir, 'gt_relative_classes.npy'),'w') as f:\n # np.save(f, gt_clases)\n # # pdb.set_trace()\n\n\nif __name__ == '__main__':\n app.run(main)\n",
"from __future__ import division\nfrom __future__ import print_function\nimport copy\nimport csv\nimport json\nimport numpy as np\nimport scipy.linalg\nimport scipy.io as sio\nimport os\nimport os.path as osp\nimport cPickle as pickle\nimport cPickle as pkl\nimport torch\nfrom torch.autograd import Variable\nfrom . import transformations\nfrom . import bbox_utils\nimport pdb\nfrom collections import defaultdict\n\ncurr_path = osp.dirname(osp.abspath(__file__))\nsymmetry_file = osp.join(curr_path, '../cachedir/symmetry_nyu.pkl')\n\nvalid_object_classes = [\n 'bed', 'sofa', 'table', 'chair', 'desk', 'television',\n]\n\nobject_class2index = {'bed' : 1, 'sofa' :2, 'table' :3, \n 'chair':4 , 'desk':5, 'television':6,\n}\n\ndef get_split(save_dir, image_names=None, train_ratio=0.7, val_ratio=0.28):\n ''' Loads saved splits if they exist. Otherwise creates a new split.\n\n Args:\n save_dir: Absolute path of where splits should be saved\n '''\n split_file = osp.join(save_dir, 'nyu_split.pkl')\n if os.path.isfile(split_file):\n return pickle.load(open(split_file, 'rb'))\n else:\n list.sort(image_names)\n image_names = np.ndarray.tolist(np.random.permutation(image_names))\n n_images = len(image_names)\n n_train = int(n_images * train_ratio)\n n_val = int(n_images * val_ratio)\n splits = {\n 'train': image_names[0:n_train],\n 'val': image_names[n_train:(n_train + n_val)],\n 'test': image_names[(n_train + n_val):]\n }\n pickle.dump(splits, open(split_file, 'wb'))\n return splits\n\ndef scale_transform(s):\n return np.diag([s, s, s, 1])\n\n\ndef trans_transform(t):\n t = t.reshape(3)\n tform = np.eye(4)\n tform[0:3, 3] = t\n return tform\n\ndef invertTransformation(transform):\n invertedTransform = np.eye(4)\n invertedTransform[0:3, 0:3] = transform[0:3, 0:3].transpose()\n tinv = -1*np.matmul(transform[0:3, 0:3].transpose(), transform[0:3, 3:])\n invertedTransform[0:3, 3] = tinv.squeeze()\n return invertedTransform.copy()\n\n\ndef euler_to_rotation_matrix(theta, phi, psi):\n '''\n theta, phi, and psi are in degrees. \n theta is rotation about x-axis\n phi is rotation about y-axis\n psi is rotation about z-axis\n '''\n rotx = np.array([[1, 0, 0, 0],\n [0, np.cos(theta), -np.sin(theta), 0],\n [0, np.sin(theta), np.cos(theta), 0],\n [0, 0, 0, 1]])\n\n roty = np.array([[np.cos(phi), 0, np.sin(phi), 0],\n [0, 1, 0 , 0],\n [-np.sin(phi), 0, np.cos(phi), 0],\n [0, 0, 0, 1]])\n \n rotz = np.array([[np.cos(psi), -np.sin(psi), 0, 0],\n [np.sin(psi), np.cos(psi), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n rot = np.matmul(np.matmul(rotx, roty), rotz)\n return rot\n\ndef get_sym_angles_from_axis(sym_type = ''):\n syms = [(0, 0, 0)]\n if sym_type == 'xz':\n syms.append((0, np.pi, 0))\n elif sym_type == 'xy':\n syms.append((0, 0, np.pi))\n elif sym_type == 'yz':\n syms.append((np.pi, 0, 0))\n elif sym_type == 'xyz':\n syms.append((np.pi, 0, 0))\n syms.append((0, np.pi, 0))\n syms.append((0, 0, np.pi))\n return syms\n\n\ndef codify_room_data(object_data, obj_loader, use_shape_code=False, max_object_classes=10):\n n_objects = len(object_data)\n codes = []\n bboxes = []\n for nx in range(n_objects):\n model_name = object_data[nx]['basemodel'].replace(\".mat\", \"\")\n if (use_shape_code):\n shape = obj_loader.lookup(model_name, 'code')\n else:\n shape = obj_loader.lookup(model_name, 'voxels')\n vox_shift = trans_transform(np.ones((3, 1)) * 0.5)\n vox2obj = np.matmul(\n np.matmul(\n trans_transform(obj_loader.lookup(model_name, 'translation')),\n scale_transform(obj_loader.lookup(model_name, 'scale')[0, 0])\n ), vox_shift)\n\n syms = get_sym_angles_from_axis(obj_loader.lookup(model_name, 'sym'))\n quat_vals = []\n cams2vox_object = []\n vox2cams_object = []\n for sym_angles in syms:\n theta, phi, psi = sym_angles\n sym_rot = euler_to_rotation_matrix(theta, phi, psi)\n scale = np.array(object_data[nx]['scale'], copy=True, dtype=np.float32)\n trans = np.array(object_data[nx]['trans'], copy=True, dtype=np.float32)\n rot_val = np.array(object_data[nx]['pose_full'], copy=True, dtype=np.float32)\n rot_val = np.pad(rot_val, (0, 1), 'constant')\n rot_val[3,3] = 1\n obj2cam = np.matmul(trans_transform(trans) , np.array(rot_val, copy=True))\n obj2cam = np.matmul(obj2cam, sym_rot)\n vox2cam = np.matmul(obj2cam, vox2obj)\n trans = np.array(vox2cam[0:3, 3], copy=True, dtype=np.float32)\n vox2cams_object.append(vox2cam)\n cams2vox_object.append(invertTransformation(vox2cam))\n quat_val = transformations.quaternion_from_matrix(rot_val, isprecise=True)\n quat_vals.append(quat_val)\n\n bbox = np.array(object_data[nx]['bbox'], copy=True, dtype=np.float32)\n quat_val = np.stack(quat_vals).copy()\n transform_cam2vox = np.stack(cams2vox_object).astype(np.float32)\n transform_vox2cam = np.stack(vox2cams_object).astype(np.float32)\n amodal_bbox = np.zeros((8,3))\n \n code = {'shape' : shape,\n 'vox2cam' : vox2cam,\n 'scale' : scale,\n 'quat' : quat_val,\n 'trans' : trans,\n 'class' : np.array([object_class2index[object_data[nx]['cls']]]),\n 'transform_cam2vox' : transform_cam2vox,\n 'transform_vox2cam' : transform_vox2cam,\n 'amodal_bbox' : amodal_bbox,\n }\n codes.append(code)\n bboxes.append(bbox)\n\n if len(bboxes) > 0:\n bboxes = np.vstack(bboxes)\n\n return codes, bboxes\n\ndef select_ids(object_data, metaloader=None, min_pixels=0):\n n_objects = len(object_data)\n objects = []\n for nx in range(n_objects):\n obj = object_data[nx]\n if obj['cls'] in valid_object_classes:\n objects.append(obj)\n\n return objects\n\nclass ObjectLoader:\n '''Pre-loading class to facilitate object lookup'''\n\n def __init__(self, object_dir):\n self._object_dir = object_dir\n object_names = [f.replace(\".mat\",\"\") for f in os.listdir(object_dir) if \"mat\" in f]\n list.sort(object_names)\n self._object_names = object_names\n self._curr_obj_id = None\n self._preloaded_data = {}\n self._predloaded_syms = defaultdict(str)\n\n def lookup(self, obj_id, field):\n if obj_id != self._curr_obj_id:\n self._curr_obj_id = obj_id\n if obj_id in self._preloaded_data.keys():\n self._curr_obj_data = self._preloaded_data[obj_id]\n else:\n self._curr_obj_data = sio.loadmat(osp.join(\n self._object_dir, obj_id + '.mat'))\n return copy.copy(self._curr_obj_data[field])\n\n def preload(self):\n with open('symmetry_nyu.pkl') as f:\n preloaded_syms = pickle.load(f)\n for ox in range(len(self._object_names)):\n obj_id = self._object_names[ox]\n obj_data = sio.loadmat(osp.join(\n self._object_dir, obj_id + '.mat'))\n obj_data_new = {}\n obj_data_new['scale'] = obj_data['scale'].copy()\n obj_data_new['translation'] = obj_data['translation'].copy()\n obj_data_new['voxels'] = obj_data['voxels'].copy()\n # for k in obj_data.keys():\n # obj_data_new[k] = obj_data[k].clone()\n self._preloaded_data[obj_id] = obj_data_new\n if obj_id in preloaded_syms.keys():\n self._preloaded_data[obj_id]['sym'] = preloaded_syms[obj_id]\n else:\n self._preloaded_data[obj_id]['sym'] = ''\n return\n\n\n# class ObjectLoader:\n# def __init__(self, object_dir):\n# self._object_dir = object_dir\n# object_names = [f.replace('.mat','') for f in os.listdir(object_dir)]\n# list.sort(object_names)\n# self._object_names = object_names\n# self._curr_obj_id = None\n# self._preloaded_data = {}\n# self._predloaded_syms = defaultdict(str)\n\n# def lookup(self, obj_id, field):\n# if obj_id != self._curr_obj_id:\n# self._curr_obj_id = obj_id\n# if obj_id in self._preloaded_data.keys():\n# self._curr_obj_data = self._preloaded_data[obj_id]\n# else:\n# self._curr_obj_data = sio.loadmat(osp.join(\n# self._object_dir, obj_id + '.mat'))\n# return copy.copy(self._curr_obj_data[field])\n\n# def preload(self):\n# # with open('symmetry2.pkl') as f:\n# # preloaded_syms = pickle.load(f)\n \n# for ox in range(len(self._object_names)):\n# obj_id = self._object_names[ox]\n# obj_data = sio.loadmat(osp.join(\n# self._object_dir, obj_id + '.mat'))\n# obj_data_new = {}\n# obj_data_new['surfaces'] = obj_data['surfaces'].copy()\n# obj_data_new['comp'] = obj_data['comp'].copy()\n# self._preloaded_data[obj_id] = obj_data_new\n# # if obj_id in preloaded_syms.keys():\n# # self._preloaded_data[obj_id]['sym'] = preloaded_syms[obj_id]\n# # else:\n# # self._preloaded_data[obj_id]['sym'] = ''\n# return\n\n\nclass MetaLoader:\n def __init__(self, ):\n return\n def lookup(self, obj_id, field='nyuv2_40class'):\n obj_class =obj_id.split('_')[0]\n return obj_class\n\n",
"import bpy\nimport math\nimport numpy as np\n\ndef get_calibration_matrix_K_from_blender(camd):\n f_in_mm = camd.lens\n scene = bpy.context.scene\n resolution_x_in_px = scene.render.resolution_x\n resolution_y_in_px = scene.render.resolution_y\n scale = scene.render.resolution_percentage / 100\n sensor_width_in_mm = camd.sensor_width\n sensor_height_in_mm = camd.sensor_height\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n if (camd.sensor_fit == 'VERTICAL'):\n # the sensor height is fixed (sensor fit is horizontal), \n # the sensor width is effectively changed with the pixel aspect ratio\n s_u = resolution_x_in_px * scale / sensor_width_in_mm / pixel_aspect_ratio \n s_v = resolution_y_in_px * scale / sensor_height_in_mm\n else: # 'HORIZONTAL' and 'AUTO'\n # the sensor width is fixed (sensor fit is horizontal), \n # the sensor height is effectively changed with the pixel aspect ratio\n pixel_aspect_ratio = scene.render.pixel_aspect_x / scene.render.pixel_aspect_y\n s_u = resolution_x_in_px * scale / sensor_width_in_mm\n s_v = resolution_y_in_px * scale * pixel_aspect_ratio / sensor_height_in_mm\n\n # Parameters of intrinsic calibration matrix K\n alpha_u = f_in_mm * s_u\n alpha_v = f_in_mm * s_v\n u_0 = resolution_x_in_px*scale / 2\n v_0 = resolution_y_in_px*scale / 2\n skew = 0 # only use rectangular pixels\n\n K = np.array(\n [[alpha_u, skew, u_0],\n [ 0 , alpha_v, v_0],\n [ 0 , 0, 1 ]])\n return K\n\ndef camPosToQuaternion(cx, cy, cz):\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = cx / camDist\n cy = cy / camDist\n cz = cz / camDist\n axis = (-cz, 0, cx)\n angle = math.acos(cy)\n a = math.sqrt(2) / 2\n b = math.sqrt(2) / 2\n w1 = axis[0]\n w2 = axis[1]\n w3 = axis[2]\n c = math.cos(angle / 2)\n d = math.sin(angle / 2)\n q1 = a * c - b * d * w1\n q2 = b * c + a * d * w1\n q3 = a * d * w2 + b * d * w3\n q4 = -b * d * w2 + a * d * w3\n return (q1, q2, q3, q4)\n\ndef quaternionFromYawPitchRoll(yaw, pitch, roll):\n c1 = math.cos(yaw / 2.0)\n c2 = math.cos(pitch / 2.0)\n c3 = math.cos(roll / 2.0) \n s1 = math.sin(yaw / 2.0)\n s2 = math.sin(pitch / 2.0)\n s3 = math.sin(roll / 2.0) \n q1 = c1 * c2 * c3 + s1 * s2 * s3\n q2 = c1 * c2 * s3 - s1 * s2 * c3\n q3 = c1 * s2 * c3 + s1 * c2 * s3\n q4 = s1 * c2 * c3 - c1 * s2 * s3\n return (q1, q2, q3, q4)\n\n\ndef camPosToQuaternion(cx, cy, cz):\n q1a = 0\n q1b = 0\n q1c = math.sqrt(2) / 2\n q1d = math.sqrt(2) / 2\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = cx / camDist\n cy = cy / camDist\n cz = cz / camDist \n t = math.sqrt(cx * cx + cy * cy) \n tx = cx / t\n ty = cy / t\n yaw = math.acos(ty)\n if tx > 0:\n yaw = 2 * math.pi - yaw\n pitch = 0\n tmp = min(max(tx*cx + ty*cy, -1),1)\n #roll = math.acos(tx * cx + ty * cy)\n roll = math.acos(tmp)\n if cz < 0:\n roll = -roll \n # print(\"%f %f %f\" % (yaw, pitch, roll))\n q2a, q2b, q2c, q2d = quaternionFromYawPitchRoll(yaw, pitch, roll) \n q1 = q1a * q2a - q1b * q2b - q1c * q2c - q1d * q2d\n q2 = q1b * q2a + q1a * q2b + q1d * q2c - q1c * q2d\n q3 = q1c * q2a - q1d * q2b + q1a * q2c + q1b * q2d\n q4 = q1d * q2a + q1c * q2b - q1b * q2c + q1a * q2d\n return (q1, q2, q3, q4)\n\ndef camRotQuaternion(cx, cy, cz, theta): \n theta = theta / 180.0 * math.pi\n camDist = math.sqrt(cx * cx + cy * cy + cz * cz)\n cx = -cx / camDist\n cy = -cy / camDist\n cz = -cz / camDist\n q1 = math.cos(theta * 0.5)\n q2 = -cx * math.sin(theta * 0.5)\n q3 = -cy * math.sin(theta * 0.5)\n q4 = -cz * math.sin(theta * 0.5)\n return (q1, q2, q3, q4)\n\ndef quaternionProduct(qx, qy): \n a = qx[0]\n b = qx[1]\n c = qx[2]\n d = qx[3]\n e = qy[0]\n f = qy[1]\n g = qy[2]\n h = qy[3]\n q1 = a * e - b * f - c * g - d * h\n q2 = a * f + b * e + c * h - d * g\n q3 = a * g - b * h + c * e + d * f\n q4 = a * h + b * g - c * f + d * e \n return (q1, q2, q3, q4)\n\ndef obj_centened_camera_pos(dist, azimuth_deg, elevation_deg):\n phi = float(elevation_deg) / 180 * math.pi\n theta = float(azimuth_deg) / 180 * math.pi\n x = (dist * math.cos(theta) * math.cos(phi))\n y = (dist * math.sin(theta) * math.cos(phi))\n z = (dist * math.sin(phi))\n return (x, y, z)\n\n\n"
] | [
[
"numpy.hstack",
"numpy.divide",
"numpy.cumsum",
"numpy.ones",
"numpy.where",
"numpy.argsort",
"numpy.zeros",
"numpy.sum",
"numpy.vstack"
],
[
"numpy.diag",
"torch.mean",
"numpy.expand_dims",
"numpy.minimum",
"torch.cat",
"torch.zeros",
"numpy.cumsum",
"matplotlib.pyplot.plot",
"numpy.max",
"numpy.round",
"numpy.concatenate",
"numpy.mean",
"numpy.argmin",
"numpy.exp",
"torch.autograd.Variable",
"numpy.random.randint",
"torch.norm",
"torch.ones",
"torch.from_numpy",
"matplotlib.pyplot.close",
"numpy.zeros",
"torch.index_select",
"numpy.log",
"numpy.min",
"numpy.linalg.lstsq",
"numpy.array",
"numpy.random.RandomState",
"numpy.sum",
"torch.nn.functional.normalize",
"numpy.random.seed",
"torch.nn.functional.log_softmax",
"torch.Tensor",
"matplotlib.use",
"torch.manual_seed",
"numpy.linalg.norm",
"numpy.ones",
"torch.clamp"
],
[
"numpy.diag",
"numpy.pad",
"numpy.eye",
"numpy.matmul",
"numpy.cos",
"numpy.stack",
"numpy.sin",
"numpy.ones",
"numpy.random.permutation",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
storopoli/Machine-Learning-Probalistic | [
"fb939b92a61f7d3a7e6c28d0b14f58d99a0b07f1",
"fb939b92a61f7d3a7e6c28d0b14f58d99a0b07f1",
"82cfdcb8daea653cda8f77e8737e585418476ca7",
"fb939b92a61f7d3a7e6c28d0b14f58d99a0b07f1",
"fb939b92a61f7d3a7e6c28d0b14f58d99a0b07f1"
] | [
"pyprobml-master/examples/betaCredibleInt.py",
"pyprobml-master/examples/regtreeSurfaceDemo.py",
"pyprobml-master/examples/mixGaussManifoldOpt.py",
"pyprobml-master/examples/scatter_demo2.py",
"pyprobml-master/examples/linregPolyVsN.py"
] | [
"from scipy.stats import beta\nimport numpy as np\n\nS = 47\nN = 100 \na = S+1\nb = (N-S)+1 \nalpha = 0.05;\n\nCI1 = beta.interval(1-alpha, a, b)\n\nl = beta.ppf(alpha/2, a, b)\nu = beta.ppf(1-alpha/2, a, b)\nCI2 = (l,u)\n\nsamples = beta.rvs(a, b, size=1000)\nsamples = np.sort(samples)\nCI3 = np.percentile(samples, 100*np.array([alpha/2, 1-alpha/2])) \n\nprint(CI1)\nprint(CI2)\nprint(CI3)\n",
"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import proj3d\nimport os\n\nt1 = 5\nt3 = 3\nt2 = 7\nt4 = 3\n\nr = np.linspace(2, 10, 5)\n\n#A function to return a tree for given (x1,x2) coordinates\ndef ManualTree(x1, x2):\n if x1 <= t1:\n if x2 <= t2:\n z = r[0]\n else:\n if x1 <= t3:\n z = r[3]\n else:\n z = r[4]\n else:\n if x2 <= t4:\n z = r[1]\n else:\n z = r[2]\n return(z)\n\nManualTree = np.vectorize(ManualTree)\n\nx = np.linspace(0,10,100)\nX, Y = np.meshgrid(x, x)\nZ = ManualTree(X.T,Y.T)\n\n#A 3D matrix for determining which colors go where.\n\ndef DivList(list1, den):\n return([e/den for e in list1])\n\n#This tells us which color we use for which output tree value. Intended to match with the latex tree graphic.\ndef ColorMapper(z):\n if z == r[0]:\n out = DivList([255.0, 0.0, 0.0], 255.0)\n elif z == r[1]:\n out = DivList([0.0, 0.0, 255.0], 255.0)\n elif z == r[2]:\n out = DivList([160.0, 32.0, 240.0], 255.0)\n elif z == r[3]:\n out = DivList([0.0, 100.0, 0.0], 255.0)\n else:\n out = DivList([255.0, 140.0, 0.0], 255.0)\n return(out)\n\n#Manually build the tree, one output tree value at a top. \n#For some spots, we need to add in the walls to show difference between two tree values.\nfig = plt.figure(figsize=(20.0/1.8, 15.0/1.8))\nax = fig.add_subplot(111, projection='3d')\nfor val in r: \n if val in [2, 4, 8]:\n logi = Z == val\n if val == 2:\n logi[50,0:70] = True\n logi[:51,70] = True\n shp = (51, 71)\n elif val == 4:\n logi[50:,30] = True\n shp = (50, 31)\n else:\n logi[30,70:] = True\n shp = (31, 30)\n x = X[logi].reshape(shp)\n y = Y[logi].reshape(shp)\n z = Z[logi].reshape(shp)\n else:\n x = X[Z==val]\n y = Y[Z==val]\n z = val\n ax.plot_wireframe(x, y, z, color=ColorMapper(val))\n \nax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 1.0))\nax.view_init(elev=30, azim=230)\nplt.savefig(os.path.join('figures', 'tree3d.pdf'))\n",
"# Fit a 2d MOG model using optimization over a Riemannian manifold.\n# as described in this paper http://arxiv.org/pdf/1506.07677v1.pdf\n# Code is slightly modified from\n# https://pymanopt.github.io/MoG.html\n\nimport autograd.numpy as np\nnp.set_printoptions(precision=2)\nimport matplotlib.pyplot as plt\n#%matplotlib inline\nfrom autograd.scipy.misc import logsumexp\nfrom pymanopt.manifolds import Product, Euclidean, PositiveDefinite\nfrom pymanopt import Problem\nfrom pymanopt.solvers import SteepestDescent, TrustRegions\n\n# Number of data\nN = 1000\n\n# Dimension of data\nD = 2\n\n# Number of clusters\nK = 3\n\n# True model parameters\npi = [0.1, 0.6, 0.3]\nmu = [np.array([-4, 1]), np.array([0, 0]), np.array([2, -1])]\nSigma = [np.array([[3, 0],[0, 1]]), np.array([[1, 1.], [1, 3]]), .5 * np.eye(2)]\n\n# Sample some data\ncomponents = np.random.choice(K, size=N, p=pi)\nsamples = np.zeros((N, D))\nfor k in range(K):\n # indices of current component in X\n indices = (k == components)\n # number of those occurrences\n n_k = indices.sum()\n if n_k > 0:\n samples[indices] = np.random.multivariate_normal(mu[k], Sigma[k], n_k)\n\n# Plot the data\ncolors = ['r', 'g', 'b', 'c', 'm']\nfor i in range(K):\n indices = (i == components)\n plt.scatter(samples[indices, 0], samples[indices, 1], alpha=.4, color=colors[i%K])\nplt.axis('equal')\nplt.show()\n\n\n\n# (1) Instantiate the manifold\nmanifold = Product([PositiveDefinite(D+1, k=K), Euclidean(K-1)])\n\n# (2) Define cost function\n# The parameters must be contained in a list theta.\ndef cost(theta):\n # Unpack parameters\n nu = np.concatenate([theta[1], [0]], axis=0)\n \n S = theta[0]\n logdetS = np.expand_dims(np.linalg.slogdet(S)[1], 1)\n y = np.concatenate([samples.T, np.ones((1, N))], axis=0)\n\n # Calculate log_q\n y = np.expand_dims(y, 0)\n \n # 'Probability' of y belonging to each cluster\n log_q = -0.5 * (np.sum(y * np.linalg.solve(S, y), axis=1) + logdetS)\n\n alpha = np.exp(nu)\n alpha = alpha / np.sum(alpha)\n alpha = np.expand_dims(alpha, 1)\n \n loglikvec = logsumexp(np.log(alpha) + log_q, axis=0)\n return -np.sum(loglikvec)\n\nproblem = Problem(manifold=manifold, cost=cost, verbosity=1)\n\n# (3) Instantiate a Pymanopt solver\n#solver = TrustRegions()\nsolver = SteepestDescent(logverbosity=1)\n\n# let Pymanopt do the rest\nXopt, optlog = solver.solve(problem)\nprint(optlog)\n\n# Inspect results\nmu_hat = Xopt[0]\nSigma_hat = Xopt[1]\nfor k in range(K):\n mu_est = Xopt[0][k][0:2, 2:3]\n Sigma_est = Xopt[0][k][:2, :2] - mu_est.dot(mu_est.T)\n print('k = {}'.format(k))\n print('true mu {}, est {}'.format(mu[k], np.ravel(mu_est)))\n \npihat = np.exp(np.concatenate([Xopt[1], [0]], axis=0))\npihat = pihat / np.sum(pihat)\nprint('true pi {}, est {}'.format(pi, pihat))\n\n",
"\"\"\"\nDemo of scatter plot with varying marker colors and sizes.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport matplotlib.cbook as cbook\nimport numpy as np\nimport os\nfrom utils.util import DATA_DIR\n\n# Load a numpy record array from yahoo csv data with fields date,\n# open, close, volume, adj_close from the mpl-data/example directory.\n# The record array stores python datetime.date as an object array in\n# the date column\ndatafile = os.path.join(DATA_DIR, 'goog.npy')\ntry:\n # Python3 cannot load python2 .npy files with datetime(object) arrays\n # unless the encoding is set to bytes. Hovever this option was\n # not added until numpy 1.10 so this example will only work with\n # python 2 or with numpy 1.10 and later.\n price_data = np.load(datafile, encoding='bytes').view(np.recarray)\nexcept TypeError:\n price_data = np.load(datafile).view(np.recarray)\n# price_data = np.load(datafile).view(np.recarray)\nprice_data = price_data[-250:] # get the most recent 250 trading days\n\ndelta1 = np.diff(price_data.adj_close)/price_data.adj_close[:-1]\n\n# Marker size in units of points^2\nvolume = (15 * price_data.volume[:-2] / price_data.volume[0])**2\nclose = 0.003 * price_data.close[:-2] / 0.003 * price_data.open[:-2]\n\nfig, ax = plt.subplots()\nax.scatter(delta1[:-1], delta1[1:], c=close, s=volume, alpha=0.5)\n\nax.set_xlabel(r'$\\Delta_i$', fontsize=20)\nax.set_ylabel(r'$\\Delta_{i+1}$', fontsize=20)\nax.set_title('Volume and percent change')\n\nax.grid(True)\nfig.tight_layout()\n\nplt.show()\n",
"\n# Based on https://github.com/probml/pmtk3/blob/master/demos/linregPolyVsN.m\nimport os\n#from utils.util import poly_data_make\nfrom sklearn.linear_model import Ridge\nfrom sklearn.preprocessing import MinMaxScaler \nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\nTrueDeg = 2 #The true degree of the model\ndegrees = [1, 2, 14, 20] #The degrees of our design matrices\n \n#Function to expand from x to design matrix of degree deg\ndef ExpandtoDeg(x,deg):\n return np.array([x**i for i in range(deg+1)]).transpose().reshape(-1,deg+1)\n\ndef make_1dregression_data(n=21):\n np.random.seed(0)\n # Example from Romaine Thibaux\n xtrain = np.linspace(0, 20, n)\n xtest = np.arange(0, 20, 0.1)\n sigma2 = 4\n w = np.array([-1.5, 1/9.])\n fun = lambda x: w[0]*x + w[1]*np.square(x)\n # Apply function to make data\n ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \\\n np.sqrt(sigma2)\n ytestNoisefree = fun(xtest)\n ytestNoisy = ytestNoisefree + np.random.normal(0, 1, xtest.shape) * \\\n np.sqrt(sigma2)\n return xtrain, ytrain, xtest, ytestNoisefree, ytestNoisy, sigma2\n \ndef make_poly_regression_data(deg=2, n=21):\n np.random.seed(0)\n xtrain = np.linspace(-1, 1, n)\n xtest = np.arange(-1, 1, 0.01)\n sigma2 = 4\n fun = lambda x: (1 + x + np.power(x, deg))\n # Apply function to make data\n ytrain = fun(xtrain) + np.random.normal(0, 1, xtrain.shape) * \\\n np.sqrt(sigma2)\n ytestNoisefree = fun(xtest)\n ytestNoisy = ytestNoisefree + np.random.normal(0, 1, xtest.shape) * \\\n np.sqrt(sigma2)\n return xtrain, ytrain, xtest, ytestNoisefree, ytestNoisy, sigma2\n \n \nfor ModDeg in degrees:\n \n ns = np.round(np.linspace(10, 210, 20))\n \n err = []\n errtrain = []\n for n in ns:\n #Forming data\n #xtrain, ytrain, xtest, _, ytest, _ = poly_data_make(sampling='thibaux', deg=TrueDeg, n=n)\n xtrain, ytrain, xtest, _, ytest, _ = make_1dregression_data(n=n)\n #xtrain, ytrain, xtest, _, ytest, _ = make_poly_regression_data(deg=TrueDeg, n=n)\n \n #Rescaling data\n scaler = MinMaxScaler(feature_range=(-1, 1))\n xtrain = scaler.fit_transform(xtrain.reshape(-1, 1))\n xtest = scaler.transform(xtest.reshape(-1, 1))\n\n #Fitting ridge regression. Small differences in alpha near zero make a visual difference in the plot when n is close to 0.\n regr = Ridge(alpha=0, fit_intercept=False) #Using ridge instead of ordinary least squares for numerical stability\n XDesignTrain = ExpandtoDeg(xtrain, ModDeg)\n XDesignTest = ExpandtoDeg(xtest, ModDeg)\n regr.fit(XDesignTrain,ytrain) \n ypred = regr.predict(XDesignTest)\n err.append(np.mean((ytest-ypred)**2))\n errtrain.append(np.mean((ytrain-regr.predict(XDesignTrain))**2))\n \n #Plotting\n fig, ax = plt.subplots()\n ax.plot(ns, err, color = 'r', marker = 's',label='test')\n ax.plot(ns, errtrain, marker = 'x', label='train')\n ax.legend(loc='upper right', shadow=True)\n ax.set_xlim([0,200])\n ax.set_ylim([0,22])\n plt.axhline(y=4, color='k', linewidth=2)\n plt.xlabel('size of training set')\n plt.ylabel('mse')\n plt.title('truth = degree {}, model = degree {}'.format(TrueDeg, ModDeg))\n plt.savefig(os.path.join('figures', 'polyfitN{}.pdf'.format(ModDeg)),orientation='landscape')\n plt.draw()\n\nplt.show()\n"
] | [
[
"scipy.stats.beta.rvs",
"scipy.stats.beta.interval",
"numpy.sort",
"scipy.stats.beta.ppf",
"numpy.array"
],
[
"matplotlib.pyplot.figure",
"numpy.vectorize",
"numpy.linspace",
"numpy.meshgrid"
],
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis"
],
[
"numpy.load",
"matplotlib.pyplot.show",
"numpy.diff",
"matplotlib.pyplot.subplots"
],
[
"numpy.square",
"matplotlib.pyplot.axhline",
"numpy.sqrt",
"numpy.random.seed",
"numpy.linspace",
"numpy.power",
"numpy.arange",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.draw",
"sklearn.linear_model.Ridge",
"numpy.random.normal",
"numpy.mean",
"sklearn.preprocessing.MinMaxScaler",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
YasuShimizu/1D-Free-Water-Surface | [
"38a86e167b43ebe46187aa6e781a93414136235f"
] | [
"initial.py"
] | [
"import numpy as np\r\n\r\ndef eta_init(eta,eta0,eta_up,eta_up0,nx,dx, \\\r\n slope,xl,xb1,xb2,xb3,dbed):\r\n zb0=xl*slope\r\n for i in np.arange(0,nx+2):\r\n xx=dx*float(i)\r\n eta_up[i]=zb0-xx*slope\r\n eta_up0[i]=eta_up[i]\r\n# print(i,nx,eta_up[i])\r\n if xx>xb1 and xx<xb2:\r\n ss=xx-xb1\r\n deta=dbed*ss/(xb2-xb1)\r\n eta_up[i]=eta_up[i]+deta\r\n elif xx>=xb2 and xx<xb3:\r\n ss=xb3-xx\r\n deta=dbed*ss/(xb3-xb2)\r\n eta_up[i]=eta_up[i]+deta\r\n\r\n for i in np.arange(1,nx+2):\r\n eta[i]=(eta_up[i]+eta_up[i-1])*.5\r\n eta0[i]=(eta_up0[i]+eta_up0[i-1])*.5\r\n eta[0]=2.*eta[1]-eta[2]\r\n eta0[0]=2.*eta0[1]-eta0[2]\r\n return eta,eta0,eta_up,eta_up0\r\n\r\ndef eta_init_2(eta,eta0,eta_up,eta_up0,nx,dx, \\\r\n xl,x_slope,slope1,slope2):\r\n zb0=x_slope*slope1+(xl-x_slope)*slope2\r\n zb1=zb0-x_slope*slope1\r\n for i in np.arange(0,nx+2):\r\n xx=dx*float(i)\r\n if xx <= x_slope:\r\n eta_up[i]=zb0-xx*slope1\r\n else:\r\n eta_up[i]=zb1-(xx-x_slope)*slope2 \r\n eta_up0[i]=eta_up[i]\r\n\r\n for i in np.arange(1,nx+2):\r\n eta[i]=(eta_up[i]+eta_up[i-1])*.5\r\n eta0[i]=(eta_up0[i]+eta_up0[i-1])*.5\r\n eta[0]=2.*eta[1]-eta[2]\r\n eta0[0]=2.*eta0[1]-eta0[2]\r\n return eta,eta0,eta_up,eta_up0\r\n\r\ndef h_init(eta,eta0,eta_up,eta_up0,h,hs,h_up,hs_up, \\\r\n hs_upstm,hs_dwstm,nx,dx,xl):\r\n xhalf=xl*.95\r\n for i in np.arange(0,nx+1):\r\n xlen=i*dx\r\n if xlen<xhalf:\r\n hs_up[i]=hs_upstm\r\n else:\r\n hs_up[i]=hs_dwstm\r\n\r\n# hs_up[i]=hs_upstm+(hs_dwstm-hs_upstm)*xlen/xl\r\n h_up[i]=eta_up0[i]+hs_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=(hs_up[i]+hs_up[i-1])*.5\r\n h[i]=eta0[i]+hs[i]\r\n\r\n for i in np.arange(0,nx+1):\r\n hs_up[i]=h_up[i]-eta_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=h[i]-eta[i]\r\n\r\n hs[0]=hs_upstm; h[0]=eta[0]+hs_upstm\r\n hs[nx+1]=hs_dwstm; h[nx+1]=eta[nx+1]+hs_dwstm\r\n\r\n return h,hs,h_up,hs_up\r\n\r\ndef h_init_2(eta,eta0,eta_up,eta_up0,h,hs,h_up,hs_up, \\\r\n hs_upstm,hs_dwstm,nx,dx,xl,x_slope):\r\n\r\n for i in np.arange(0,nx+1):\r\n xlen=i*dx\r\n if xlen<x_slope:\r\n hs_up[i]=hs_upstm\r\n else:\r\n hs_up[i]=hs_dwstm\r\n\r\n# hs_up[i]=hs_upstm+(hs_dwstm-hs_upstm)*xlen/xl\r\n h_up[i]=eta_up0[i]+hs_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=(hs_up[i]+hs_up[i-1])*.5\r\n h[i]=eta0[i]+hs[i]\r\n\r\n for i in np.arange(0,nx+1):\r\n hs_up[i]=h_up[i]-eta_up[i]\r\n\r\n for i in np.arange(1,nx+1):\r\n hs[i]=h[i]-eta[i]\r\n\r\n hs[0]=hs_upstm; h[0]=eta[0]+hs_upstm\r\n hs[nx+1]=hs_dwstm; h[nx+1]=eta[nx+1]+hs_dwstm\r\n\r\n return h,hs,h_up,hs_up\r\n\r\ndef u_init(g,qp,u,hs_up,fr,nx):\r\n for i in np.arange(0,nx+1):\r\n u[i]=qp/hs_up[i]\r\n fr[i]=u[i]/np.sqrt(g*hs_up[i])\r\n# print(i,hs_up[i],u[i],fr[i])\r\n\r\n return u,fr\r\n\r\ndef x_cell_init(x_cell,x,dx,nx):\r\n for i in np.arange(1,nx+1):\r\n x_cell[i]=(x[i]+x[i-1])*.5\r\n x_cell[0]=x_cell[1]-dx\r\n x_cell[nx+1]=x_cell[nx]+dx\r\n\r\n return x_cell\r\n\r\ndef h0_cal(eta,eta_up,nx,dx,qp,snm,h0_up):\r\n for i in np.arange(1,nx):\r\n slope=(eta[i]-eta[i+1])/dx\r\n if slope<=0. :\r\n h0=0.\r\n else:\r\n h0=(qp*snm/np.sqrt(slope))**(.6)\r\n h0_up[i]=eta_up[i]+h0\r\n if i==1:\r\n h0_up[0]=eta_up[0]+h0\r\n elif i==nx-1:\r\n h0_up[nx]=eta_up[nx]+h0\r\n\r\n return h0_up\r\n"
] | [
[
"numpy.arange",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dennis-l/tolteca | [
"1dffaffb585eb7027e26b34ae01e8632bef134cb",
"1dffaffb585eb7027e26b34ae01e8632bef134cb",
"1dffaffb585eb7027e26b34ae01e8632bef134cb",
"1dffaffb585eb7027e26b34ae01e8632bef134cb"
] | [
"tolteca/simu/toltec/models.py",
"tolteca/recipes/play_tones.py",
"tolteca/simu0/toltec/atm.py",
"tolteca/simu/toltec/simulator.py"
] | [
"#!/usr/bin/env python\n\n\nfrom gwcs import coordinate_frames as cf\nimport astropy.units as u\nfrom astropy.time import Time\nfrom astropy.modeling import models, Parameter, Model\nfrom astropy.coordinates import SkyCoord, Angle\nfrom astropy.table import Table\nfrom astropy.cosmology import default_cosmology\nfrom astropy import constants as const\nfrom astropy.utils.decorators import classproperty\nfrom scipy.interpolate import interp1d\nfrom dataclasses import dataclass, field\nimport numpy as np\n\nfrom tollan.utils.dataclass_schema import add_schema\nfrom tollan.utils.log import timeit, get_logger\nfrom tollan.utils.fmt import pformat_yaml\nfrom kidsproc.kidsmodel import _Model as ComplexModel\nfrom contextlib import contextmanager, ExitStack\n\nfrom ...utils.common_schema import PhysicalTypeSchema\nfrom ...utils import get_pkg_data_path\nfrom .toltec_info import toltec_info\nfrom .lmt import get_lmt_atm_models\n\nfrom ..base import ProjModel, LabelFrame\nfrom ..sources.base import PowerLoadingModel\nfrom ..mapping.utils import rotation_matrix_2d, _get_skyoffset_frame\n\n\n__all__ = [\n 'pa_from_coords',\n 'ToltecArrayProjModel', 'ToltecSkyProjModel',\n 'KidsReadoutNoiseModel',\n 'ToltecArrayPowerLoadingModel',\n 'ToltecPowerLoadingModel'\n ]\n\n\ndef pa_from_coords(observer, coords_altaz, coords_icrs):\n \"\"\"Calculate parallactic angle at coords.\n\n \"\"\"\n # TODO: revisit this\n # http://star-www.st-and.ac.uk/~fv/webnotes/chapter7.htm\n # note that their are issues with these values\n # where cosha^2 + sinha^2 is off from 1. by 0.1%. This\n # gives about 0.7 deg of deviation from the direct\n # calculation using LST from time_obs\n cosha = (\n np.sin(coords_altaz.alt.radian)\n - np.sin(coords_icrs.dec.radian)\n * np.sin(observer.location.lat.radian)) / (\n np.cos(coords_icrs.dec.radian)\n * np.cos(observer.location.lat.radian)\n )\n sinha = (\n -np.sin(coords_altaz.az.radian)\n * np.cos(coords_altaz.alt.radian)\n / np.cos(coords_icrs.dec.radian)\n )\n # print(sinha ** 2 + cosha ** 2 - 1)\n parallactic_angle = Angle(np.arctan2(\n sinha,\n (\n np.tan(observer.location.lat.radian)\n * np.cos(coords_icrs.dec.radian)\n - np.sin(coords_icrs.dec.radian)\n * cosha)\n ) << u.rad)\n return parallactic_angle\n\n\nclass ToltecArrayProjModel(ProjModel):\n \"\"\"\n A model to transform TolTEC detector locations and orientations on the\n each array to a common TolTEC instrument frame defined in offset angle\n unit, with the extent of arrays normalized to the size of the on-sky\n field of view.\n\n The TolTEC frame is attached to the TolTEC instrument body and describes\n the projected positions and orientations of all detectors on the sky. The\n origin of the TolTEC frame is fixed at the telescope bore sight.\n\n The two axes az_offset and alt_offset is aligned with the telescope\n Az/Alt at altitude of 0 deg, and they rotate by the value of the altitude\n following the left hand rule.\n\n The orientations of detectors also get projected to the TolTEC frame,\n where the P.A = 0 is set to be the +alt_offset and the sign convention\n follows the left hand rule.\n \"\"\"\n\n input_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='det_pos',\n axes_names=(\"x\", \"y\"),\n unit=(u.um, u.um),\n ),\n LabelFrame(\n axes_names=['array', 'fg'], axes_order=(2, 3),\n name='det_prop'),\n ], name='focal_plane')\n output_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='sky_offset',\n axes_names=(\"az_offset\", \"alt_offset\"),\n unit=(u.deg, u.deg)),\n cf.CoordinateFrame(\n naxes=1,\n axes_type='SPATIAL',\n axes_order=(2, ),\n unit=(u.deg, ),\n axes_names=(\"pa\", ),\n name='det_pa'),\n ], name='toltec')\n n_inputs = input_frame.naxes\n n_outputs = output_frame.naxes\n\n _array_index_to_mounting_angle = {\n toltec_info[array_name]['index']:\n toltec_info[array_name]['array_mounting_angle']\n for array_name in toltec_info['array_names']\n }\n\n _fg_to_det_pa = {\n toltec_info[fg_name]['index']:\n toltec_info[fg_name]['det_pa']\n for fg_name in toltec_info['fg_names']\n }\n\n _plate_scale = toltec_info['fov_diameter'] \\\n / toltec_info['array_physical_diameter']\n # this is need to make the affine transform work correctly\n _plate_unit = toltec_info['array_physical_diameter'].unit\n\n _mat_refl = np.array([[1, 0], [0, -1]], dtype='d')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # build the models for transforming x and y\n m_pos = dict()\n m_pa = dict()\n for ai, rot in self._array_index_to_mounting_angle.items():\n m_pos[ai] = models.AffineTransformation2D(\n (\n rotation_matrix_2d(rot.to_value(u.rad)) @ self._mat_refl\n ) << self._plate_unit,\n translation=(0., 0.) << self._plate_unit\n ) | (\n models.Multiply(self._plate_scale) &\n models.Multiply(self._plate_scale)\n )\n for fg, pa in self._fg_to_det_pa.items():\n m_pa[(ai, fg)] = models.Const1D(pa + rot)\n m_proj = dict()\n for k, m in m_pa.items():\n # build the full proj model\n # k[0] is array index\n m_proj[k] = models.Mapping((0, 1, 0)) | m_pos[k[0]] & m_pa[k]\n self._m_pos = m_pos\n self._m_pa = m_pa\n self._m_proj = m_proj\n\n def evaluate(self, x, y, array, fg):\n # note that both array and fg are coerced to double and\n # we need to make them int before creating the masks\n array = array.astype(int)\n fg = fg.astype(int)\n # loop over proj models and populate result\n result = np.empty((self.n_outputs, ) + x.shape, dtype='d') << u.deg\n # this is used to check if all values are covered\n not_computed = np.ones(x.shape, dtype=bool)\n for k, m in self._m_proj.items():\n mask = (array == k[0]) & (fg == k[1])\n result[0, mask], result[1, mask], result[2, mask] = m(\n x[mask], y[mask])\n not_computed[mask] = False\n if np.sum(not_computed) > 0:\n invalid = np.unique(\n np.vstack([\n array[not_computed],\n fg[not_computed]]),\n axis=1\n ).T\n raise ValueError(\n f\"Invalid (array, fg) in input: {invalid}\")\n # apply the transformation for each unit\n return result\n\n\nclass ToltecSkyProjModel(ProjModel):\n \"\"\"\n A model to transform TolTEC detector positions and orientations\n expressed in offset angular unit in the TolTEC frame to\n absolute world coordinates for given telescope bore sight target\n and time of obs.\n\n The output coordinate frame is a generic sky lon/lat frame which\n can represent any of the valid celestial coordinate frames supported,\n by specifying the ``evaluate_frame`` keyword argument.\n \"\"\"\n\n logger = get_logger()\n\n def __init__(\n self,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n origin_coords_icrs, origin_coords_altaz, \\\n origin_az, origin_alt, mjd = self._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=True,\n )\n if np.isscalar(mjd):\n n_models = 1\n else:\n n_models = len(mjd)\n super().__init__(\n origin_az=origin_az, origin_alt=origin_alt, mjd=mjd,\n n_models=n_models)\n self._origin_coords_icrs = origin_coords_icrs\n self._origin_coords_altaz = origin_coords_altaz\n # this is to be overridden by the __call__ so that we can\n # ensure the evaluation is always done with __call__\n self._eval_context = None\n\n def __setattr__(self, attr, value):\n # since we cache the origin coords and we need to disallow\n # changing the params to make all of the values in-sync.\n if attr in ('origin_az', 'origin_alt', 'mjd'):\n raise AttributeError(f'{attr} is read-only')\n return super().__setattr__(attr, value)\n\n @classmethod\n def _make_origin_coords(\n cls,\n origin_coords_icrs, origin_coords_altaz, time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=True,\n ):\n if sum([origin_coords_altaz is None, origin_coords_icrs is None]) == 2:\n raise ValueError(\n \"at least one of origin_coords_{altaz,icrs} is needed.\")\n if origin_coords_altaz is None and (ensure_altaz or return_params):\n # compute origin altaz from icrs and time_obs\n if time_obs is None:\n raise ValueError(\"time is need to transform to altaz.\")\n with timeit(\"transform origin from icrs to altaz\"):\n origin_coords_altaz = origin_coords_icrs.transform_to(\n cls.observer.altaz(time=time_obs))\n if origin_coords_icrs is None and ensure_icrs:\n # compute origin icrs from altaz\n with timeit(\"transform origin from altaz to icrs\"):\n origin_coords_icrs = origin_coords_altaz.transform_to(\"icrs\")\n if return_params:\n origin_az = origin_coords_altaz.az\n origin_alt = origin_coords_altaz.alt\n mjd = (origin_coords_altaz.frame.obstime.mjd) << u.day\n return (\n origin_coords_icrs, origin_coords_altaz,\n origin_az, origin_alt, mjd)\n return (origin_coords_icrs, origin_coords_altaz)\n\n input_frame = ToltecArrayProjModel.output_frame\n output_frame = cf.CompositeFrame([\n cf.Frame2D(\n name='sky',\n axes_names=(\"lon\", \"lat\"),\n unit=(u.deg, u.deg)),\n cf.CoordinateFrame(\n naxes=1,\n axes_type='SPATIAL',\n axes_order=(2, ),\n unit=(u.deg, ),\n axes_names=(\"pa\", ),\n name='det_pa'),\n ], name='sky')\n\n n_inputs = input_frame.naxes\n n_outputs = output_frame.naxes\n\n origin_az = Parameter(\n default=180.,\n unit=output_frame.unit[0],\n description='The Az of the telescope bore sight.'\n )\n origin_alt = Parameter(\n default=60.,\n unit=output_frame.unit[1],\n description='The Alt of the telescope bore sight.'\n )\n mjd = Parameter(\n default=Time(2022.0, format='jyear').mjd,\n unit=u.day,\n description='The UT of observation expressed in MJD.'\n )\n\n observer = toltec_info['site']['observer']\n \"\"\"The observer (LMT).\"\"\"\n\n @classmethod\n def _get_altaz_frame(cls, mjd):\n return cls.observer.altaz(time=Time(mjd, format='mjd'))\n\n @classmethod\n def _get_origin_coords_altaz(cls, origin_az, origin_alt, mjd):\n \"\"\"Return the origin coordinates in AltAz.\"\"\"\n return SkyCoord(\n origin_az,\n origin_alt,\n frame=cls._get_altaz_frame(mjd)\n )\n\n @classmethod\n @timeit\n def _get_altaz_offset_frame(cls, origin_coords_altaz):\n \"\"\"Return the sky offset frame in AltAz centered at origin.\"\"\"\n return _get_skyoffset_frame(origin_coords_altaz)\n\n @classmethod\n @timeit\n def evaluate_altaz(\n cls, x, y, pa,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n \"\"\"Compute the projected coordinates in AltAz using full\n transformation.\n \"\"\"\n _, origin_coords_altaz = cls._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=False,\n return_params=False,\n )\n # now we always have origin_coords_altaz\n with timeit(\"apply rotation to detector offset coords\"):\n origin_alt = origin_coords_altaz.alt\n # The first step has to be rotation the toltec frame by\n # the amount of origin_coords_altaz.alt, due to the M3 mirror.\n mat_rot_m3 = rotation_matrix_2d(origin_alt.to_value(u.rad))\n\n # there should be more clever way of this but for now\n # we just spell out the rotation because x and y are already\n # separated arrays\n x_offset_altaz = mat_rot_m3[0, 0] * x + mat_rot_m3[0, 1] * y\n y_offset_altaz = mat_rot_m3[1, 0] * x + mat_rot_m3[1, 1] * y\n # y_offset_altaz = mat_rot_m3[1, 0][:, np.newaxis] \\\n # * x[np.newaxis, :] \\\n # + mat_rot_m3[1, 1][:, np.newaxis] * y[np.newaxis, :]\n # the pa get rotated by the value of alt\n pa_altaz = (pa + origin_alt).to(u.deg)\n\n # now do the coordinate transformation\n with timeit(\"transform detector offset coords to altaz\"):\n altaz_offset_frame = cls._get_altaz_offset_frame(\n origin_coords_altaz)\n det_coords_altaz_offset = SkyCoord(\n x_offset_altaz, y_offset_altaz, frame=altaz_offset_frame)\n det_coords_altaz = det_coords_altaz_offset.transform_to(\n origin_coords_altaz.frame)\n return det_coords_altaz.az, det_coords_altaz.alt, pa_altaz\n\n @classmethod\n @timeit\n def evaluate_icrs_fast(\n cls, x, y, pa,\n origin_coords_icrs=None,\n origin_coords_altaz=None,\n time_obs=None):\n \"\"\"Compute the projected coordinates in ICRS with small field\n approximation (TolTEC FOV is small ~4 arcmin) directly.\n \"\"\"\n origin_coords_icrs, origin_coords_altaz = cls._make_origin_coords(\n origin_coords_icrs=origin_coords_icrs,\n origin_coords_altaz=origin_coords_altaz,\n time_obs=time_obs,\n ensure_altaz=True,\n ensure_icrs=True,\n return_params=False,\n )\n with timeit(\"compute rotation angle from toltec frame to icrs\"):\n origin_par_angle = cls.observer.parallactic_angle(\n origin_coords_altaz.obstime,\n origin_coords_icrs)\n # now we can rotate the x y and pa by alt + par_ang\n rot = origin_coords_altaz.alt + origin_par_angle\n\n with timeit(\"apply rotation to detector offset coords\"):\n # The first step has to be rotation the toltec frame by\n # the amount of origin_alt, due to the M3 mirror.\n mat_rot_m3 = rotation_matrix_2d(rot.to_value(u.rad))\n\n # there should be more clever way of this but for now\n # we just spell out the rotation because x and y are already\n # separated arrays\n x_offset_icrs = mat_rot_m3[0, 0][:, np.newaxis] \\\n * x[np.newaxis, :] \\\n + mat_rot_m3[0, 1][:, np.newaxis] * y[np.newaxis, :]\n y_offset_icrs = mat_rot_m3[1, 0][:, np.newaxis] \\\n * x[np.newaxis, :] \\\n + mat_rot_m3[1, 1][:, np.newaxis] * y[np.newaxis, :]\n # the pa get rotated by the value of rot\n pa_icrs = pa + rot\n\n with timeit(\"transform detector offset coords to icrs\"):\n # now we need to build the icrs offset frame and transform back to\n # absolute coordinates\n icrs_offset_frame = _get_skyoffset_frame(origin_coords_icrs)\n\n det_coords_icrs_offset = SkyCoord(\n x_offset_icrs, y_offset_icrs, frame=icrs_offset_frame)\n det_coords_icrs = det_coords_icrs_offset.transform_to(\n origin_coords_icrs.frame)\n return det_coords_icrs.ra, det_coords_icrs.dec, pa_icrs\n\n @staticmethod\n def _check_frame_by_name(frame, frame_name):\n if isinstance(frame, str):\n return frame == frame_name\n return frame.name == frame_name\n\n @timeit\n def evaluate(\n self,\n x, y, pa, origin_az, origin_alt, mjd):\n # make sure we have _eval_context set before proceed\n eval_ctx = self._eval_context\n if eval_ctx is None:\n raise ValueError(\"This model can only be evaluated with __call__\")\n evaluate_frame = eval_ctx['evaluate_frame']\n\n # create origin coords in altaz\n origin_coords_altaz = self._get_origin_coords_altaz(\n origin_az=origin_az, origin_alt=origin_alt,\n mjd=mjd)\n\n result_altaz = self.evaluate_altaz(\n x, y, pa, origin_coords_altaz=origin_coords_altaz)\n\n # update evaluate_context\n result_az, result_alt, pa_altaz = result_altaz\n coords_altaz = SkyCoord(\n az=result_az, alt=result_alt, frame=origin_coords_altaz.frame\n )\n eval_ctx['pa_altaz'] = pa_altaz\n eval_ctx['coords_altaz'] = coords_altaz\n\n if self._check_frame_by_name(evaluate_frame, 'altaz'):\n return result_altaz\n elif self._check_frame_by_name(evaluate_frame, 'icrs'):\n # TODO the handling of other frame for the PA has to be on a\n # per-frame basis? So we only implement for now the ICRS\n with timeit(\"transform detector coords from altaz to icrs\"):\n coords_icrs = coords_altaz.transform_to('icrs')\n # calculate the par angle between the two set of coords\n dpa_altaz_icrs = pa_from_coords(\n observer=self.observer,\n coords_altaz=coords_altaz,\n coords_icrs=coords_icrs)\n pa_icrs = pa_altaz + dpa_altaz_icrs\n eval_ctx['pa_icrs'] = pa_icrs\n eval_ctx['coords_icrs'] = coords_icrs\n eval_ctx['dpa_altaz_icrs'] = dpa_altaz_icrs\n return coords_icrs.ra, coords_icrs.dec, pa_icrs\n else:\n raise ValueError(f\"invalid evaluate_frame {evaluate_frame}\")\n\n @timeit('toltec_sky_proj_evaluate')\n def __call__(\n self, *args,\n evaluate_frame='icrs',\n use_evaluate_icrs_fast=False,\n return_eval_context=False):\n\n result_eval_context = dict(\n evaluate_frame=evaluate_frame,\n )\n\n @contextmanager\n def _set_eval_context():\n nonlocal result_eval_context\n self._eval_context = result_eval_context\n yield\n self._eval_context = None\n\n def wrap_return(result):\n nonlocal result_eval_context\n if return_eval_context:\n return result, result_eval_context\n return result\n\n with _set_eval_context():\n if self._check_frame_by_name(evaluate_frame, 'icrs') and \\\n use_evaluate_icrs_fast:\n # use the fast icrs eval\n return wrap_return(self.evaluate_icrs_fast(\n *args,\n origin_coords_altaz=self._origin_coords_altaz,\n origin_coords_icrs=self._origin_coords_icrs,\n ))\n return wrap_return(super().__call__(*args))\n\n # TODO this is to override the default behavior of checking the model\n # axis. We allow the model axis to broadcasted with size=1.\n def _validate_input_shape(\n self, _input, idx, argnames, model_set_axis, check_model_set_axis):\n \"\"\"\n Perform basic validation of a single model input's shape\n -- it has the minimum dimensions for the given model_set_axis\n\n Returns the shape of the input if validation succeeds.\n \"\"\"\n input_shape = np.shape(_input)\n # Ensure that the input's model_set_axis matches the model's\n # n_models\n if input_shape and check_model_set_axis:\n # Note: Scalar inputs *only* get a pass on this\n if len(input_shape) < model_set_axis + 1:\n raise ValueError(\n f\"For model_set_axis={model_set_axis},\"\n f\" all inputs must be at \"\n f\"least {model_set_axis + 1}-dimensional.\")\n if input_shape[model_set_axis] > 1 and (\n input_shape[model_set_axis] != self._n_models):\n try:\n argname = argnames[idx]\n except IndexError:\n # the case of model.inputs = ()\n argname = str(idx)\n\n raise ValueError(\n f\"Input argument '{argname}' does not have the correct \"\n f\"dimensions in model_set_axis={model_set_axis} for a \"\n f\"model set with \"\n f\"n_models={self._n_models}.\")\n return input_shape\n\n\nclass KidsReadoutNoiseModel(ComplexModel):\n \"\"\"\n A model of the TolTEC KIDs readout noise.\n\n \"\"\"\n logger = get_logger()\n\n n_inputs = 1\n n_outputs = 1\n\n def __init__(self, scale_factor=1.0, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._inputs = ('S21', )\n self._outputs = ('dS21', )\n self._scale_factor = scale_factor\n\n def evaluate(self, S21):\n n = self._scale_factor\n shape = S21.shape\n dI = np.random.normal(0, n, shape)\n dQ = np.random.normal(0, n, shape)\n return dI + 1.j * dQ\n\n def evaluate_tod(self, apt, S21):\n \"\"\"Make readout noise in ADU.\"\"\"\n\n dS21 = self(S21)\n dS21 = dS21 * apt['sigma_readout'][:, np.newaxis]\n return dS21\n\n\ndef _get_default_passbands():\n \"\"\"Return the default TolTEC passband tables as a dict.\n \"\"\"\n from ...cal.toltec import ToltecPassband\n calobj = ToltecPassband.from_indexfile(get_pkg_data_path().joinpath(\n 'cal/toltec_passband/index.yaml'\n ))\n result = dict()\n for array_name in calobj.array_names:\n result[array_name] = calobj.get(array_name=array_name)\n return result\n\n\nclass ToltecArrayPowerLoadingModel(Model):\n \"\"\"\n A model of the LMT optical loading at the TolTEC arrays.\n\n This is based on the Mapping-speed-calculator\n \"\"\"\n\n # TODO allow overwriting these per instance.\n _toltec_passbands = _get_default_passbands()\n _cosmo = default_cosmology.get()\n\n logger = get_logger()\n\n n_inputs = 1\n n_outputs = 2\n\n @property\n def input_units(self):\n return {self.inputs[0]: u.deg}\n\n def __init__(self, array_name, atm_model_name='am_q50', *args, **kwargs):\n super().__init__(name=f'{array_name}_loading', *args, **kwargs)\n self._inputs = ('alt', )\n self._outputs = ('P', 'nep')\n self._array_name = array_name\n self._array_info = toltec_info[array_name]\n self._passband = self._toltec_passbands[array_name]\n self._f = self._passband['f'].quantity\n # check the f step, they shall be uniform\n df = np.diff(self._f).value\n if np.std(df) / df[0] > 1e-7:\n raise ValueError(\n \"invalid passband format, frequency grid has to be uniform\")\n self._df = self._f[1] - self._f[0]\n self._throughput = self._passband['throughput']\n if atm_model_name is not None:\n self._atm_model, self._atm_tx_model = get_lmt_atm_models(\n name=atm_model_name)\n else:\n self._atm_model = None\n # we still need the atm transmission for calculating efficiency\n # TODO revisit this\n _, self._atm_tx_model = get_lmt_atm_models(\n name='am_q50')\n\n @property\n def has_atm_model(self):\n return self._atm_model is not None\n\n @classproperty\n def _internal_params(cls):\n \"\"\"Lower level instrument parameters for LMT/TolTEC.\n\n Note that all these values does not take into account the\n passbands, and are frequency independent.\n \"\"\"\n # TODO merge this to the instrument fact yaml file?\n p = {\n 'det_optical_efficiency': 0.8,\n 'det_noise_factor': 0.334,\n 'horn_aperture_efficiency': 0.35,\n 'tel_diameter': 48. << u.m,\n 'tel_surface_rms': 76. << u.um,\n 'tel_emissivity': 0.06,\n 'T_coldbox': 5.75 << u.K,\n 'T_tel': 273. << u.K, # telescope ambient temperature\n 'T_coupling_optics': 290. << u.K, # coupling optics\n }\n # derived values\n p['tel_area'] = np.pi * (p['tel_diameter'] / 2.) ** 2\n # effective optics temperature due to telescope and the coupling\n p['T_warm'] = (\n p['tel_emissivity'] * p['T_tel']\n # TODO add documents for the numbers here\n + 3. * p['T_coupling_optics'] * 0.01\n )\n # cold efficiency is the efficiency inside the cold box.\n p['cold_efficiency'] = (\n p['det_optical_efficiency'] * p['horn_aperture_efficiency'])\n # effetive temperature at detectors for warm components through\n # the cold box\n p['T_det_warm'] = (p['T_warm'] * p['cold_efficiency'])\n # effetive temperature at detectors for cold box\n # note that the \"horn aperture efficiency\" is actually the\n # internal system aperture efficiency since it includes the\n # truncation of the lyot stop and the loss to the cold optics\n p['T_det_coldbox'] = (\n p['T_coldbox'] * p['det_optical_efficiency']\n * (1. - p['horn_aperture_efficiency'])\n )\n return p\n\n @property\n def _tel_primary_surface_optical_efficiency(self):\n \"\"\"The telescope optical efficiency due to RMS of the\n primary surface over the passband.\n\n This is just the Ruze formula.\n \"\"\"\n tel_surface_rms = self._internal_params['tel_surface_rms']\n f = self._f\n return np.exp(-((4.0 * np.pi * tel_surface_rms)/(const.c / f)) ** 2)\n\n @property\n def _system_efficiency(self):\n \"\"\"The overall system efficiency over the passband.\"\"\"\n return (\n self._tel_primary_surface_optical_efficiency\n * self._internal_params['cold_efficiency']\n * self._throughput\n )\n\n @staticmethod\n def _wsum(q, w):\n \"\"\"Return weighted sum of some quantity.\n\n q : `astropy.units.Quantity`\n The quantity.\n\n w : float\n The wegith.\n \"\"\"\n if w.ndim > 1:\n raise ValueError(\"weight has to be 1d\")\n return np.nansum(q * w, axis=-1) / np.nansum(w)\n\n def _get_T_atm(\n self, alt,\n return_avg=False):\n \"\"\"Return the atmosphere temperature.\n\n This is the \"true\" temperature without taking into account the system\n efficiency.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n atm_model = self._atm_model\n if atm_model is None:\n return np.squeeze(np.zeros((alt.size, self._f.size)) << u.K)\n # here we put the alt on the first axis for easier reduction on f.\n T_atm = atm_model(*np.meshgrid(self._f, alt, indexing='ij')).T\n if return_avg:\n T_atm = self._wsum(T_atm, self._throughput)\n T_atm = np.squeeze(T_atm)\n return T_atm\n\n def _get_tx_atm(self, alt):\n \"\"\"Return the atmosphere transmission.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n \"\"\"\n atm_tx_model = self._atm_tx_model\n # here we put the alt on the first axis for easier reduction on f.\n tx_atm = atm_tx_model(*np.meshgrid(self._f, alt, indexing='ij')).T\n tx_atm = np.squeeze(tx_atm)\n return tx_atm\n\n def _get_T(\n self, alt,\n return_avg=False\n ):\n \"\"\"Return the effective temperature at altitude `alt`, as seen\n by the cryostat.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n T_atm = self._get_T_atm(alt, return_avg=False)\n # add the telescope warm component temps\n T_tot = T_atm + self._internal_params['T_warm']\n if return_avg:\n T_tot = self._wsum(T_tot, self._system_efficiency)\n return T_tot\n\n def _get_T_det(\n self, alt,\n return_avg=True):\n \"\"\"Return the effective temperature seen by the detectors\n at altitude `alt`.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the weighted sum over the passband instead.\n \"\"\"\n T_atm = self._get_T_atm(alt, return_avg=False)\n # TODO why no telescope efficiency term?\n T_det = (\n T_atm * self._internal_params['cold_efficiency']\n + self._internal_params['T_det_warm']\n + self._internal_params['T_det_coldbox']\n ) * self._throughput\n if return_avg:\n # note this is different from the Detector.py in that\n # does not mistakenly (?) average over the passband again\n T_det = np.mean(T_det)\n return T_det\n\n def _T_to_dP(self, T):\n \"\"\"Return the Rayleigh-Jeans power for the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n \"\"\"\n # power from RJ source in frequency bin df\n # TODO this can be done this way because we ensured df is contant\n # over the passband.\n # we may change this to trapz to allow arbitrary grid?\n return const.k_B * T * self._df\n\n def _T_to_dnep(self, T):\n \"\"\"Return the photon noise equivalent power in W / sqrt(Hz) for\n the passband frequency bins.\n \"\"\"\n f = self._f\n df = self._df\n dP = self._T_to_dP(T)\n\n shot = 2. * const.k_B * T * const.h * f * df\n wave = 2. * dP ** 2 / df\n return np.sqrt(shot + wave)\n\n def _T_to_dnet_cmb(self, T, tx_atm):\n \"\"\"Return the noise equivalent CMB temperature in K / sqrt(Hz) for\n the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n tx_atm : array\n The atmosphere transmission.\n \"\"\"\n f = self._f\n df = self._df\n Tcmb = self._cosmo.Tcmb(0)\n\n dnep = self._T_to_dnep(T)\n x = const.h * f / (const.k_B * Tcmb)\n net_integrand = (\n (const.k_B * x) ** 2.\n * (1. / const.k_B)\n * np.exp(x) / (np.expm1(x)) ** 2.\n )\n dnet = dnep / (\n np.sqrt(2.0)\n * self._system_efficiency\n * net_integrand\n * df)\n # scale by the atmosphere transmission so this is comparable\n # to astronomical sources.\n return dnet / tx_atm\n\n def _dnep_to_dnefd(self, dnep, tx_atm):\n \"\"\"Return the noise equivalent flux density in Jy / sqrt(Hz) for\n the passband frequency bins.\n\n Parameters\n ----------\n T : `astropy.units.Quantity`\n The temperature.\n tx_atm : array\n The atmosphere transmission.\n \"\"\"\n df = self._df\n A = self._internal_params['tel_area']\n # TODO Z. Ma: I combined the sqrt(2) term. need to check the eqn here.\n dnefd = (\n dnep\n / (A * df)\n / self._system_efficiency\n * np.sqrt(2.))\n # scale by the atmosphere transmission so this is comparable\n # to astronomical sources.\n return dnefd / tx_atm # Jy / sqrt(Hz)\n\n def _get_P(self, alt):\n \"\"\"Return the detector power loading at altitude `alt`.\n\n \"\"\"\n T_det = self._get_T_det(alt=alt, return_avg=False)\n return np.nansum(self._T_to_dP(T_det), axis=-1).to(u.pW)\n\n def _get_dP(self, alt, f_smp):\n \"\"\"Return the detector power loading uncertainty according to the nep\n \"\"\"\n return (\n self._get_noise(alt)['nep']\n * np.sqrt(f_smp / 2.)).to(u.pW)\n\n def _get_noise(self, alt, return_avg=True):\n \"\"\"Return the noise at altitude `alt`.\n\n Parameters\n ----------\n alt : `astropy.units.Quantity`\n The altitude.\n return_avg : bool, optional\n If True, return the value integrated for the passband.\n \"\"\"\n # noise calculations\n # strategy is to do this for each frequency bin and then do a\n # weighted average across the band. This is copied directly from\n # Sean's python code.\n T_det = self._get_T_det(alt=alt, return_avg=False)\n dnep_phot = self._T_to_dnep(T_det)\n\n # detector noise factor coefficient\n det_noise_coeff = np.sqrt(\n 1. + self._internal_params['det_noise_factor'])\n\n dnep = dnep_phot * det_noise_coeff\n\n # atm transmission\n tx_atm = self._get_tx_atm(alt)\n # the equivalent noise in astronomical units\n dnet_cmb = (\n self._T_to_dnet_cmb(T_det, tx_atm=tx_atm)\n * det_noise_coeff\n )\n dnefd = self._dnep_to_dnefd(dnep, tx_atm=tx_atm)\n\n if return_avg:\n # integrate these up\n net_cmb = np.sqrt(1.0 / np.nansum(dnet_cmb ** (-2.0), axis=-1))\n nefd = np.sqrt(1.0 / np.nansum(dnefd ** (-2.0), axis=-1))\n # nep is sum of squares\n nep = np.sqrt(np.nansum(dnep ** 2.0, axis=-1))\n # power just adds\n return {\n 'net_cmb': net_cmb.to(u.mK * u.Hz ** -0.5),\n 'nefd': nefd.to(u.mJy * u.Hz ** -0.5),\n 'nep': nep.to(u.aW * u.Hz ** -0.5)\n }\n return {\n 'dnet_cmb': net_cmb.to(u.mK * u.Hz ** -0.5),\n 'dnefd': nefd.to(u.mJy * u.Hz ** -0.5),\n 'dnep': nep.to(u.aW * u.Hz ** -0.5)\n }\n\n def make_summary_table(self, alt=None):\n \"\"\"Return a summary for a list of altitudes.\n\n \"\"\"\n if alt is None:\n alt = [50., 60., 70.] << u.deg\n result = dict()\n result['P'] = self._get_P(alt)\n result.update(self._get_noise(alt, return_avg=True))\n return Table(result)\n\n def evaluate(self, alt):\n P = self._get_P(alt)\n nep = self._get_noise(alt, return_avg=True)['nep']\n return P, nep\n\n def sky_sb_to_pwr(self, det_s):\n \"\"\"Return detector power loading for given on-sky surface brightness.\n \"\"\"\n # note that this is approximate using a square passband.\n wl_center = self._array_info['wl_center']\n pb_width = self._array_info['passband']\n tb = det_s.to(\n u.K,\n equivalencies=u.brightness_temperature(\n wl_center))\n p = (\n tb.to(\n u.J,\n equivalencies=u.temperature_energy())\n * pb_width\n ).to(u.pW)\n # the sys eff is also approximate\n sys_eff = self._wsum(\n self._system_efficiency, self._throughput\n )\n return p * sys_eff\n\n @contextmanager\n def eval_interp_context(self, alt_grid):\n interp_kwargs = dict(kind='linear')\n with timeit(\n f\"setup power loading model for {self._array_name} \"\n f\"eval interp context with \"\n f\"alt_grid=[{alt_grid.min()}:{alt_grid.max()}] \"\n f\"size={len(alt_grid)}\"):\n self._p_pW_interp = interp1d(\n alt_grid.to_value(u.deg),\n self._get_P(alt_grid).to_value(u.pW),\n **interp_kwargs\n )\n one_Hz = 1 << u.Hz\n self._dp_pW_interp_unity_f_smp = interp1d(\n alt_grid.to_value(u.deg),\n self._get_dP(alt_grid, one_Hz).to_value(u.pW),\n **interp_kwargs\n )\n yield self\n self._p_pW_interp = None\n self._dp_pW_interp_unity_f_smp = None\n\n def evaluate_tod(\n self,\n det_alt,\n f_smp=1 << u.Hz,\n random_seed=None,\n return_realized_noise=True,\n ):\n \"\"\"Return the array power loading along with the noise.\"\"\"\n\n if self._p_pW_interp is None:\n # no interp, direct eval\n alt = np.ravel(det_alt)\n det_pwr = self._get_P(alt).to(u.pW).reshape(det_alt.shape),\n det_delta_pwr = self._get_dP(alt, f_smp).reshape(\n det_alt.shape).to(u.pW),\n else:\n det_pwr = self._p_pW_interp(det_alt.degree) << u.pW\n one_Hz = 1. << u.Hz\n det_delta_pwr = (self._dp_pW_interp_unity_f_smp(\n det_alt.degree) << u.pW) * np.sqrt(f_smp / one_Hz)\n if not return_realized_noise:\n return det_pwr, det_delta_pwr\n # realize noise\n rng = np.random.default_rng(seed=random_seed)\n det_noise = rng.normal(0., det_delta_pwr.to_value(u.pW)) << u.pW\n # calc the median P and dP for logging purpose\n med_alt = np.median(det_alt)\n med_P = self._get_P(med_alt).to(u.pW)\n med_dP = self._get_dP(med_alt, f_smp).to(u.aW)\n self.logger.debug(\n f\"array power loading at med_alt={med_alt} P={med_P} dP={med_dP}\")\n return det_pwr, det_noise\n\n\nclass ToltecPowerLoadingModel(PowerLoadingModel):\n \"\"\"\n A wrapper model to calculate power loading for all the TolTEC arrays.\n\n This model in-corporates both the \"static\" am_qxx models and the toast\n model.\n \"\"\"\n\n logger = get_logger()\n array_names = toltec_info['array_names']\n\n n_inputs = 3\n n_outputs = 1\n\n def __init__(\n self, atm_model_name, atm_model_params=None,\n atm_cache_dir=None\n ):\n if atm_model_name is None or atm_model_name == 'toast':\n # this will disable the atm component in the power loading model\n # but still create one for system efficiency calculation\n _atm_model_name = None\n else:\n _atm_model_name = atm_model_name\n self._array_power_loading_models = {\n array_name: ToltecArrayPowerLoadingModel(\n array_name=array_name,\n atm_model_name=_atm_model_name)\n for array_name in self.array_names\n }\n if atm_model_name == 'toast':\n self._toast_atm_evaluator = ToastAtmEvaluator(\n cache_dir=atm_cache_dir,\n params=atm_model_params)\n else:\n self._toast_atm_evaluator = None\n super().__init__(name='toltec_power_loading')\n self.inputs = ('array_name', 'S', 'alt')\n self.outputs = ('P', )\n self._atm_model_name = atm_model_name\n\n @property\n def atm_model_name(self):\n return self._atm_model_name\n\n def evaluate(self):\n # TODO\n # implement the default behavior for the model\n return NotImplemented\n\n def aplm_eval_interp_context(\n self, t0, t_grid,\n sky_bbox_altaz, alt_grid):\n \"\"\"Context manager that pre-calculate the interp for array power\n loading model.\n \"\"\"\n es = ExitStack()\n for m in self._array_power_loading_models.values():\n es.enter_context(m.eval_interp_context(alt_grid))\n # setup the toast eval context\n if self._toast_atm_evaluator is not None:\n es.enter_context(self._toast_atm_evaluator.setup(\n t0=t0,\n t_grid=t_grid,\n sky_bbox_altaz=sky_bbox_altaz,\n alt_grid=alt_grid,\n ))\n return es\n\n def get_P(self, det_array_name, det_az, det_alt):\n \"\"\"Evaluate the power loading model only and without noise.\"\"\"\n p_out = np.zeros(det_alt.shape) << u.pW\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n if self.atm_model_name == 'toast':\n p = self._toast_atm_evaluator.calc_toast_atm_pwr_for_array(\n array_name=array_name,\n det_az=det_az[mask],\n det_alt=det_alt[mask])\n else:\n # use the ToltecArrayPowerLoadingModel\n p, _ = aplm.evaluate_tod(\n det_alt[mask], return_realized_noise=False)\n p_out[mask] = p\n return p_out\n\n def sky_sb_to_pwr(self, det_array_name, det_s):\n p_out = np.zeros(det_s.shape) << u.pW\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n # compute the power loading from on-sky surface brightness\n p_out[mask] = aplm.sky_sb_to_pwr(det_s=det_s[mask])\n return p_out\n\n def evaluate_tod(\n self, det_array_name, det_s, det_az, det_alt,\n f_smp,\n noise_seed=None,\n ):\n p_out = self.sky_sb_to_pwr(det_array_name, det_s)\n for array_name in self.array_names:\n mask = (det_array_name == array_name)\n aplm = self._array_power_loading_models[array_name]\n if self.atm_model_name is None:\n # atm is disabled\n pass\n elif self.atm_model_name == 'toast':\n p = self._toast_atm_evaluator.calc_toast_atm_pwr_for_array(\n array_name=array_name,\n det_az=det_az[mask],\n det_alt=det_alt[mask])\n p_out[mask] += p\n else:\n # use the ToltecArrayPowerLoadingModel atm\n p, p_noise = aplm.evaluate_tod(\n det_alt=det_alt[mask],\n f_smp=f_smp,\n random_seed=noise_seed,\n return_realized_noise=True,\n )\n p_out[mask] += (p + p_noise)\n return p_out\n\n def __str__(self):\n return (\n f'{self.__class__.__name__}(atm_model_name={self.atm_model_name})')\n\n\n@add_schema\n@dataclass\nclass ToastAtmConfig(object):\n \"\"\"The config class for TOAST atm model.\"\"\"\n lmin_center: u.Quantity = field(\n default=0.01 << u.meter,\n metadata={\n 'description': 'The lmin_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmin_sigma: u.Quantity = field(\n default=0.001 << u.meter,\n metadata={\n 'description': 'The lmin_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmax_center: u.Quantity = field(\n default=10.0 << u.meter,\n metadata={\n 'description': 'The lmax_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n lmax_sigma: u.Quantity = field(\n default=10.0 << u.meter,\n metadata={\n 'description': 'The lmax_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n z0_center: u.Quantity = field(\n default=2000.0 << u.meter,\n metadata={\n 'description': 'The z0_center value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n z0_sigma: u.Quantity = field(\n default=0.0 << u.meter,\n metadata={\n 'description': 'The z0_sigma value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n zatm: u.Quantity = field(\n default=40000.0 << u.meter,\n metadata={\n 'description': 'The zatm value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n zmax: u.Quantity = field(\n default=2000.0 << u.meter,\n metadata={\n 'description': 'The zmax value',\n 'schema': PhysicalTypeSchema('length')\n }\n )\n\n class Meta:\n schema = {\n 'ignore_extra_keys': False,\n 'description': 'The parameters related to TOAST atm model.'\n }\n\n\nclass ToastAtmEvaluator(object):\n \"\"\"A helper class to work with the Toast Atm model class.\"\"\"\n\n def __init__(self, cache_dir=None, params=None):\n self._cache_dir = cache_dir\n if params is None:\n params = ToastAtmConfig()\n self._params = params\n self._toast_atm_simu = None\n\n @contextmanager\n def setup(self, t0, t_grid, sky_bbox_altaz, alt_grid):\n \"\"\"A context for TOAST atm calculation.\"\"\"\n # initialize the toast atm model\n # create the ToastAtmosphereSimulation instance here with\n # self._params and the sky bbox, and compute the atm slabs\n from . import toast_atm\n\n init_kwargs = {\n 't0': t0,\n 'tmin': t0.unix,\n 'tmax': (t0 + t_grid[-1]).unix,\n 'azmin': sky_bbox_altaz.w,\n 'azmax': sky_bbox_altaz.e,\n 'elmin': sky_bbox_altaz.s,\n 'elmax': sky_bbox_altaz.n,\n 'cachedir': self._cache_dir\n }\n self.logger.debug(\n f\"init toast atm simulation with:\\n{pformat_yaml(init_kwargs)}\"\n )\n toast_atm_simu = self._toast_atm_simu = \\\n toast_atm.ToastAtmosphereSimulation(**init_kwargs)\n # here we can pass the atm params to toast for generating the slabs\n setup_params = self._params\n\n self.logger.debug(\n f\"setup toast atm simulation slabs with params:\\n\"\n f\"{pformat_yaml(setup_params)}\")\n toast_atm_simu.generate_simulation(**self._params.to_dict())\n yield\n # clean up the context\n self._toast_atm_simu = None\n\n def calc_toast_atm_pwr_for_array(self, array_name, det_az, det_alt):\n toast_atm_simu = self._toast_atm_simu\n if toast_atm_simu is None:\n raise RuntimeError(\n \"The toast atm simulator is not setup.\")\n # TODO\n # implement this to do integral for each det position\n # at each time for a single array given by array_name\n raise NotImplementedError(\"toast atm is not implemented yet\")\n",
"#! /usr/bin/env python\n\n\n# Author:\n# Zhiyuan Ma\n# History:\n# 2020/04/10 Zhiyuan Ma:\n# - First staged.\n\n\"\"\"This recipe takes a tune file, and use the frequencies of the tones\nto generate a wav file.\n\nAdditional required dependencies:\n\n $ pip install midi2audio MIDIUtil\n\n`Fluidsynth` is required to run midi2audio. On macOS, it can be\ninstalled via homebrew:\n\n $ brew install fluidsynth\n\nAt least one sound font needs to be present to be able to render\nthe MIDI, which can be found on\nhttps://github.com/FluidSynth/fluidsynth/wiki/SoundFont\n\nTo install the sound font, follow the doc of `midi2audio` at\n\nhttps://github.com/bzamecnik/midi2audio\n\nTo play the sounds in-line, one also need to install:\n\n pip install playsound\n # on macOS, also do:\n pip install pyobjc\n\n\"\"\"\nfrom tolteca.io.toltec import KidsModelParams\nfrom tollan.utils.log import get_logger, init_log\nimport numpy as np\nfrom midiutil import MIDIFile\nfrom midi2audio import FluidSynth\nimport tempfile\n\n\ndef main(tunefile, wavfile=None):\n\n logger = get_logger()\n tune = KidsModelParams(tunefile)\n logger.debug(f'tune model: {tune.model}')\n\n f_a4 = 440\n # a #a b c #c d #d e f #f g #g\n # 0 1 2 3 4 5 6 7 8 9 10 11\n # A B C D E F G A\n natural_am_midi = np.arange(57, 57 + 13)\n natural_am_scale = (2 ** np.linspace(0, 1, 13)) * f_a4\n\n # c d e g a\n # am_to_penta_c_maj\n trans = [3, 5, 7, 10, 12]\n\n # d e g a b\n # am_to_yo\n # trans = [5, 7, 10, 12, 2]\n\n midi_scale = natural_am_midi[trans]\n scale = natural_am_scale[trans]\n\n logger.debug(f'midi scale: {midi_scale}')\n logger.debug(f'scale: {scale}')\n\n notes = (np.tile(\n tune.model.fr.value, (len(scale), 1)).T % scale).T\n notes = np.argmin(notes, axis=0)\n logger.debug(f'notes:\\n{notes}')\n\n midi_notes = midi_scale[notes]\n # make midi\n track = 0\n channel = 0\n time = 0 # In beats\n duration = 0.5 # In beats\n tempo = 120 # In BPM\n volume = 100 # 0-127, as per the MIDI standard\n\n # One track, defaults to format 1 (tempo track\n # automatically created)\n MyMIDI = MIDIFile(1)\n MyMIDI.addTempo(track, time, tempo)\n\n for note in midi_notes:\n MyMIDI.addNote(track, channel, note, time, duration, volume)\n time = time + 0.25\n\n with tempfile.NamedTemporaryFile() as f:\n MyMIDI.writeFile(f)\n\n def to_wav(wavfile):\n FluidSynth().midi_to_audio(f.name, wavfile)\n\n if wavfile is None:\n from playsound import playsound\n with tempfile.NamedTemporaryFile() as g:\n to_wav(g.name)\n playsound(g.name)\n else:\n to_wav(wavfile)\n\n\nif __name__ == \"__main__\":\n init_log(level='DEBUG')\n\n import argparse\n parser = argparse.ArgumentParser(\n description='Generate a tune from tune file.')\n parser.add_argument(\"tunefile\", help='The tune file to use.')\n parser.add_argument(\n \"--output\", '-o', help='output file. If not set, play.')\n args = parser.parse_args()\n main(args.tunefile, wavfile=args.output)\n",
"\nimport numpy as np\nimport datetime\nimport astropy.units as u\nimport astropy.constants as const\n\nfrom tollan.utils.log import timeit, get_logger\nimport toast\n\nfrom . import get_default_passbands\nfrom .lmt import lmt_info\n\n# load the atmosphere tools\ntry:\n from toast.atm import (\n atm_absorption_coefficient_vec, \n atm_atmospheric_loading_vec\n )\n have_atm_utils = True\nexcept ImportError as err:\n have_atm_utils = False\n raise err\n\n__all__ = ['ToastAtmosphereSimulation']\n\nclass ToastAtmosphereSimulation(object):\n \"\"\" toast Atmosphere Slabs \n TODO: kwargs the inputs to toast.atm.AtmSim\n \"\"\"\n def __init__(self, t0, tmin, tmax, azmin, azmax, elmin, elmax, cachedir=None):\n self.t0 = t0\n self.tmin = tmin\n self.tmax = tmax\n self.azmin = azmin\n self.azmax = azmax\n self.elmin = elmin\n self.elmax = elmax\n self.cachedir = cachedir\n\n self.site_height = u.Quantity(lmt_info['location']['height'])\n\n\n @timeit\n def generate_simulation(self):\n \"\"\" Generates with parameters\n \"\"\"\n \n self.atm_slabs = self._generate_toast_atm_slabs(\n self.t0, self.tmin, self.tmax, \n self.azmin, self.azmax, \n self.elmin, self.elmax\n )\n\n self._bandpass_calculations()\n\n @staticmethod\n def spectrum_convolution(freqs, spectrum, throughput):\n \"\"\" Convolve the provided spectrum with the detector bandpass\n Args:\n freqs(array of floats): Spectral bin locations\n spectrum(array of floats): Spectral bin values\n throughput(array of floats): throughput of the bandpass\n Returns:\n (array): The bandpass-convolved spectrum\n \"\"\"\n # interpolation not needed since we use the same array\n freqs = freqs.to_value(u.GHz)\n\n # norm the bandpass\n norm = toast._libtoast.integrate_simpson(freqs, throughput)\n throughput /= norm\n\n # convolved the data\n convolved = toast._libtoast.integrate_simpson(\n freqs, spectrum * throughput\n )\n return convolved\n\n def _absorption_coefficient(self, bandpass):\n absorption = atm_absorption_coefficient_vec(\n self.site_height.to_value(u.meter),\n self.sim_weather.air_temperature.to_value(u.Kelvin),\n self.sim_weather.surface_pressure.to_value(u.Pa),\n self.sim_weather.pwv.to_value(u.mm),\n bandpass[0].to_value(u.GHz),\n bandpass[-1].to_value(u.GHz),\n len(bandpass),\n ) \n return absorption\n\n def _atmospheric_loading(self, bandpass):\n loading = atm_atmospheric_loading_vec(\n self.site_height.to_value(u.meter),\n self.sim_weather.air_temperature.to_value(u.Kelvin),\n self.sim_weather.surface_pressure.to_value(u.Pa),\n self.sim_weather.pwv.to_value(u.mm),\n bandpass[0].to_value(u.GHz),\n bandpass[-1].to_value(u.GHz),\n len(bandpass),\n )\n return loading\n\n @timeit\n def _bandpass_calculations(self):\n \n self.absorption = dict()\n self.loading = dict()\n\n pb = get_default_passbands()\n for band in ['a1100', 'a1400', 'a2000']:\n\n # get the bandpass/throughput\n bandpass_freqs = np.array(pb[band]['f'][:]) * u.GHz\n bandpass_throughput = np.array(pb[band]['throughput'])\n \n # calculate the absorption/loading\n absorption = self._absorption_coefficient(bandpass_freqs)\n loading= self._atmospheric_loading(bandpass_freqs)\n\n # calculate the convolution\n absorption_det = self.spectrum_convolution(bandpass_freqs, absorption, bandpass_throughput)\n loading_det = self.spectrum_convolution(bandpass_freqs, loading, bandpass_throughput)\n\n # store it for later use\n self.absorption[band] = absorption_det\n self.loading[band]= loading_det\n\n def _generate_toast_atm_slabs(self, t0, tmin, tmax, azmin, azmax, elmin, elmax, mpi_comm=None):\n \"\"\"Creates the atmosphere models using multiple slabs\n Currently, only the parameters that define the time ranges, azimuth ranges, \n elevation ranges are exposed (by necessity)\n \"\"\"\n\n # Starting slab parameters (thank you Ted)\n rmin = 0 * u.meter\n rmax = 100 * u.meter\n scale = 10.0\n xstep = 5 * u.meter\n ystep = 5 * u.meter\n zstep = 5 * u.meter\n\n # RNG state\n key1 = 0\n key2 = 0\n counter1 = 0\n counter2 = 0\n\n # obtain the weather information\n # TODO: separate out the weather to its own method\n self.sim_weather = toast.weather.SimWeather(\n time = t0.to_datetime(timezone=datetime.timezone.utc),\n name=\"LMT\", median_weather=True\n )\n self.T0_center = self.sim_weather.air_temperature\n self.wx = self.sim_weather.west_wind\n self.wy = self.sim_weather.south_wind\n w_center = np.sqrt(self.wx ** 2 + self.wy ** 2)\n wdir_center = np.arctan2(self.wy, self.wx)\n\n # dict of atmosphere slabs\n atm_slabs = dict()\n\n # generate slabs until rmax > 100000 meters\n # TODO: eventually expose these\n while rmax < (100000 * u.meter):\n slab_id = f'{key1}_{key2}_{counter1}_{counter2}'\n toast_atmsim_model = toast.atm.AtmSim(\n azmin=azmin, azmax=azmax,\n elmin=elmin, elmax=elmax,\n tmin=tmin, tmax=tmax,\n lmin_center=0.01 * u.meter,\n lmin_sigma=0.001 * u.meter,\n lmax_center=10.0 * u.meter,\n lmax_sigma =10.0 * u.meter,\n w_center=w_center,\n w_sigma=0 * (u.km / u.second),\n wdir_center=wdir_center,\n wdir_sigma=0 * u.radian,\n z0_center=2000 * u.meter,\n z0_sigma=0 * u.meter,\n T0_center=self.T0_center,\n T0_sigma=10 * u.Kelvin,\n zatm=40000.0 * u.meter,\n zmax=2000.0 * u.meter,\n xstep=xstep,\n ystep=ystep,\n zstep=zstep,\n nelem_sim_max=20000,\n comm=mpi_comm,\n key1=key1,\n key2=key2,\n counterval1=counter1,\n counterval2=counter2,\n cachedir=self.cachedir, # TODO: add a cachedir in the working folder\n rmin=rmin,\n rmax=rmax,\n node_comm=None,\n node_rank_comm=None\n )\n \n # simulate the atmosphere\n use_cache = False\n if self.cachedir is not None:\n use_cache = True\n err = toast_atmsim_model.simulate(use_cache=use_cache)\n if err != 0:\n raise RuntimeError(\"toast atmosphere generation failed\")\n \n # include in stack\n atm_slabs[slab_id] = toast_atmsim_model\n \n # use a new RNG stream for each slab\n counter1 += 1\n\n # decrease resolution as we increase altitude\n rmin = u.Quantity(rmax)\n rmax *= scale\n xstep *= np.sqrt(scale)\n ystep *= np.sqrt(scale)\n zstep *= np.sqrt(scale)\n\n return atm_slabs\n",
"#!/usr/bin/env python\n\nfrom .models import (\n ToltecArrayProjModel, ToltecSkyProjModel, pa_from_coords,\n ToltecPowerLoadingModel)\nfrom .toltec_info import toltec_info\nfrom ..utils import PersistentState, SkyBoundingBox, make_time_grid\nfrom ..mapping import (PatternKind, LmtTcsTrajMappingModel)\nfrom ..mapping.utils import resolve_sky_coords_frame\nfrom ..sources.base import (SurfaceBrightnessModel, )\nfrom ..sources.models import (ImageSourceModel, CatalogSourceModel)\n\nfrom ...utils.common_schema import PhysicalTypeSchema\nfrom tollan.utils.nc import NcNodeMapper\nfrom tollan.utils.log import get_logger, timeit\nfrom tollan.utils.fmt import pformat_yaml\nfrom tollan.utils.dataclass_schema import add_schema\nfrom kidsproc.kidsmodel.simulator import KidsSimulator\nfrom kidsproc.kidsmodel import ReadoutGainWithLinTrend\n\nfrom scipy.interpolate import interp1d\nimport netCDF4\nimport astropy.units as u\nfrom astropy.table import Column, QTable\nimport numpy as np\nfrom contextlib import ExitStack, contextmanager\nfrom datetime import datetime\nimport shutil\nfrom dataclasses import dataclass, field\nfrom astropy.coordinates.erfa_astrom import (\n erfa_astrom, ErfaAstromInterpolator)\nfrom astropy.coordinates import Angle, Longitude, Latitude # , AltAz, SkyCoord\n\n\n__all__ = ['ToltecObsSimulator', 'ToltecHwpConfig']\n\n\n@add_schema\n@dataclass\nclass ToltecHwpConfig(object):\n \"\"\"The config class for TolTEC half-wave plate and the rotator.\"\"\"\n\n f_rot: u.Quantity = field(\n default=4. << u.Hz,\n metadata={\n 'description': 'The rotator frequency',\n 'schema': PhysicalTypeSchema(\"frequency\"),\n }\n )\n f_smp: u.Quantity = field(\n default=20. << u.Hz,\n metadata={\n 'description': 'The sampling frequency '\n 'of the position angle.',\n 'schema': PhysicalTypeSchema(\"frequency\"),\n }\n )\n rotator_enabled: bool = field(\n default=False,\n metadata={\n 'description': 'True if use HWPR.'\n }\n )\n\n class Meta:\n schema = {\n 'ignore_extra_keys': False,\n 'description': 'The parameters related to HWP.'\n }\n\n\nclass ToltecObsSimulator(object):\n\n logger = get_logger()\n\n info = toltec_info\n site_info = info['site']\n observer = ToltecSkyProjModel.observer\n _m_array_proj = ToltecArrayProjModel()\n _m_sky_proj_cls = ToltecSkyProjModel\n _kids_readout_model_cls = ReadoutGainWithLinTrend\n\n def __init__(self, array_prop_table, polarized=False, hwp_config=None):\n\n apt = self._array_prop_table = self._prepare_array_prop_table(\n array_prop_table)\n self._polarized = polarized\n if hwp_config is None:\n hwp_config = ToltecHwpConfig()\n self._hwp_config = hwp_config\n # create low level models\n self._kids_simulator = KidsSimulator(\n fr=apt['fr'],\n Qr=apt['Qr'],\n background=apt['background'],\n responsivity=apt['responsivity']\n )\n self._kids_readout_model = self._kids_readout_model_cls(\n n_models=len(apt),\n **{\n c: apt[c]\n for c in self._kids_readout_model_cls.param_names\n }\n )\n self.logger.debug(f\"kids_simulator: {self.kids_simulator}\")\n self.logger.debug(f\"kids_readout_model: {self.kids_readout_model}\")\n\n @property\n def array_prop_table(self):\n \"\"\"The table containing all detector properties.\"\"\"\n return self._array_prop_table\n\n @property\n def array_names(self):\n return self.array_prop_table.meta['array_names']\n\n @property\n def polarized(self):\n \"\"\"True if to simulate polarized signal.\"\"\"\n return self._polarized\n\n @property\n def hwp_config(self):\n return self._hwp_config\n\n @property\n def kids_simulator(self):\n \"\"\"The KIDs signal simulator to convert optical loading to\n KIDs timestream (I, Q).\"\"\"\n return self._kids_simulator\n\n @property\n def kids_readout_model(self):\n \"\"\"The model to simulate specialties of the KIDs data readout system.\n \"\"\"\n return self._kids_readout_model\n\n # these are some fiducial kids model params\n _default_kids_props = {\n 'fp': 'f', # column name of apt if string\n 'fr': 'f',\n 'Qr': 1e4,\n 'g0': 200,\n 'g1': 0,\n 'g': 200,\n 'phi_g': 0,\n 'f0': 'f',\n 'k0': 0 / u.Hz,\n 'k1': 0 / u.Hz,\n 'm0': 0,\n 'm1': 0\n }\n\n @classmethod\n def _prepare_array_prop_table(cls, array_prop_table):\n \"\"\"This function populates the `array_prop_table` with sensible\n defaults required to run the simulator\"\"\"\n tbl = array_prop_table.copy()\n # note that the apt passed to the function maybe a small portion\n # (both of row-wise and column-wise) of the full array_prop_table\n # of the TolTEC instrument. We check the column for the available\n # group names\n array_names = tbl.meta['array_names'] = np.unique(\n tbl['array_name']).tolist()\n # array props\n ap_to_cn_map = {\n 'wl_center': 'wl_center',\n 'a_fwhm': 'a_fwhm',\n 'b_fwhm': 'b_fwhm',\n 'background': 'background',\n 'bkg_temp': 'bkg_temp',\n 'responsivity': 'responsivity',\n 'passband': 'passband',\n }\n for array_name in array_names:\n m = tbl['array_name'] == array_name\n props = {\n c: toltec_info[array_name][k]\n for k, c in ap_to_cn_map.items()\n }\n for c in props.keys():\n if c not in tbl.colnames:\n tbl.add_column(Column(\n np.empty((len(tbl), ), dtype='d'),\n name=c, unit=props[c].unit))\n tbl[c][m] = props[c]\n\n # kids props\n for c, v in cls._default_kids_props.items():\n if c in tbl.colnames:\n continue\n cls.logger.debug(f\"create kids prop column {c}\")\n if isinstance(v, str) and v in tbl.colnames:\n tbl[c] = tbl[v]\n continue\n if isinstance(v, u.Quantity):\n value = v.value\n unit = v.unit\n else:\n value = v\n unit = None\n if np.isscalar(value):\n tbl.add_column(\n Column(np.full((len(tbl),), value), name=c, unit=unit))\n else:\n raise ValueError('invalid kids prop')\n\n # calibration related\n # TODO need to revisit these assumptions\n if 'flxscale' not in tbl.colnames:\n tbl['flxscale'] = (1. / tbl['responsivity']).quantity.value\n\n # kids readout noise\n if 'sigma_readout' not in tbl.colnames:\n tbl['sigma_readout'] = 10.\n\n # detector locations in toltec frame\n if not {'x_t', 'y_t', 'pa_t'}.issubset(tbl.colnames):\n x_t, y_t, pa_t = cls._m_array_proj(\n tbl['x'].quantity,\n tbl['y'].quantity,\n tbl['array'], tbl['fg']\n )\n if not {\"x_t\", \"y_t\"}.issubset(tbl.colnames):\n tbl['x_t'] = x_t\n tbl['y_t'] = y_t\n if 'pa_t' not in tbl.colnames:\n tbl['pa_t'] = pa_t\n return QTable(tbl)\n\n def __str__(self):\n return (\n f'{self.__class__.__name__}('\n f'n_detectors={len(self.array_prop_table)}, '\n f'polarized={self.polarized})'\n )\n\n def output_context(self, dirpath):\n return ToltecSimuOutputContext(simulator=self, rootpath=dirpath)\n\n @timeit\n def _get_detector_sky_traj(\n self,\n time_obs,\n bs_coords_altaz,\n bs_coords_icrs,\n evaluate_interp_len=None):\n \"\"\"Return the detector positions of shape [n_detectors, n_times]\n on sky.\n \"\"\"\n logger = get_logger()\n if evaluate_interp_len is None:\n apt = self.array_prop_table\n x_t = apt['x_t']\n y_t = apt['y_t']\n pa_t = apt['pa_t']\n\n logger.debug(\n f'get {len(apt)} detector sky trajectories for '\n f'{len(time_obs)} time steps')\n\n m_sky_proj = self._m_sky_proj_cls(\n origin_coords_icrs=bs_coords_icrs,\n origin_coords_altaz=bs_coords_altaz)\n # this will do the altaz and icrs eval and save all\n # intermediate objects in the eval_ctx dict\n _, eval_ctx = m_sky_proj(\n x_t[np.newaxis, :],\n y_t[np.newaxis, :],\n pa_t[np.newaxis, :],\n evaluate_frame='icrs',\n use_evaluate_icrs_fast=False,\n return_eval_context=True\n )\n # unpack the eval_ctx\n # note that the detector id is dim0 and time_obs is dim1\n det_sky_traj = dict()\n det_sky_traj['az'] = eval_ctx['coords_altaz'].az\n det_sky_traj['alt'] = eval_ctx['coords_altaz'].alt\n det_sky_traj['pa_altaz'] = eval_ctx['pa_altaz']\n det_sky_traj['ra'] = eval_ctx['coords_icrs'].ra\n det_sky_traj['dec'] = eval_ctx['coords_icrs'].dec\n det_sky_traj['pa_icrs'] = eval_ctx['pa_icrs']\n # dpa_altaz_icrs = eval_ctx['dpa_altaz_icrs']\n return det_sky_traj\n\n # make a subset of parameters for faster evaluate\n # we need to make sure mjd_obs is sorted before hand\n logger.debug(\n f\"evaluate sky_proj_model with \"\n f\"evaluate_interp_len={evaluate_interp_len}\")\n mjd = time_obs.mjd << u.day\n if not np.all(np.diff(mjd) >= 0):\n raise ValueError('time_obs has to be sorted ascending.')\n # collect the subsample index\n s = [0]\n for i, t in enumerate(mjd):\n if t - mjd[s[-1]] < evaluate_interp_len:\n continue\n s.append(i)\n # ensure the last index is in the subsample\n if s[-1] != len(mjd) - 1:\n s.append(-1)\n logger.debug(\n f\"prepare sky_proj_model for {len(s)}/{len(mjd)} time steps\")\n time_obs_s = time_obs[s]\n bs_coords_altaz_s = bs_coords_altaz[s]\n bs_coords_icrs_s = bs_coords_icrs[s]\n # evaluate with the subsample data\n det_sky_traj_s = self._get_detector_sky_traj(\n time_obs=time_obs_s,\n bs_coords_altaz=bs_coords_altaz_s,\n bs_coords_icrs=bs_coords_icrs_s,\n evaluate_interp_len=None\n )\n # now build the interp along the time dim.\n mjd_day_s = mjd[s].to_value(u.day)\n interp_kwargs = dict(kind='linear', axis=1)\n az_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['az'].degree, **interp_kwargs)\n alt_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['alt'].degree, **interp_kwargs)\n pa_altaz_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['pa_altaz'].to_value(u.deg), **interp_kwargs)\n\n ra_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['ra'].degree, **interp_kwargs)\n dec_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['dec'].degree, **interp_kwargs)\n pa_icrs_deg_interp = interp1d(\n mjd_day_s,\n det_sky_traj_s['pa_icrs'].to_value(u.deg), **interp_kwargs)\n # interp for full time steps\n mjd_day = mjd.to_value(u.day)\n det_sky_traj = dict()\n det_sky_traj['az'] = Longitude(az_deg_interp(mjd_day) << u.deg)\n det_sky_traj['alt'] = Latitude(alt_deg_interp(mjd_day) << u.deg)\n det_sky_traj['pa_altaz'] = Angle(pa_altaz_deg_interp(mjd_day) << u.deg)\n det_sky_traj['ra'] = Longitude(ra_deg_interp(mjd_day) << u.deg)\n det_sky_traj['dec'] = Latitude(dec_deg_interp(mjd_day) << u.deg)\n det_sky_traj['pa_icrs'] = Angle(pa_icrs_deg_interp(mjd_day) << u.deg)\n return det_sky_traj\n\n def probing_evaluator(\n self,\n f_smp,\n kids_p_tune,\n kids_fp=None,\n power_loading_model=None):\n \"\"\"Return a function that can be used to calculate detector readout.\n\n When `power_loading_model` is given, the detector signal will be the\n sum of the contribution from the astronomical source and the power\n loading model which includes the telescope and atmosphere::\n\n P_det = P_src + P_bkg_fixture + P_atm(alt)\n\n We set the tune of the KidsSimulator,such that x=0 at P=kids_p_tune,\n where kids_p_tune is typically calculated with the loading model\n at some altitude P_bkg_fixture + P_atm(alt_tune)\n\n Thus the measured detuning parameters is proportional to\n\n P_src + (P_atm(alt) - P_atm(alt_tune))\n \"\"\"\n\n apt = self.array_prop_table\n det_array_name = apt['array_name']\n kids_simu = self.kids_simulator.copy()\n kids_simu._background = kids_p_tune\n kids_readout_model = self.kids_readout_model\n if kids_fp is None:\n kids_fp = kids_simu.fr\n if power_loading_model is not None:\n # make sure this is an instance of the toltec power loading model\n if not isinstance(power_loading_model, ToltecPowerLoadingModel):\n raise ValueError(\"invalid power loading model.\")\n\n def kids_probe_p(det_p):\n nonlocal kids_simu\n return kids_simu.probe_p(\n det_p,\n fp=kids_fp, readout_model=kids_readout_model)\n\n def sky_sb_to_pwr(det_s):\n if power_loading_model is None:\n # in this case we just convert the detector surface brightness\n # to power with simple square passband.\n # convert det sb to pwr loading\n return (\n det_s.to(\n u.K,\n equivalencies=u.brightness_temperature(\n apt['wl_center'][:, np.newaxis])).to(\n u.J,\n equivalencies=u.temperature_energy())\n * apt['passband'][:, np.newaxis]\n ).to(u.pW)\n return power_loading_model.sky_sb_to_pwr(\n det_array_name=det_array_name,\n det_s=det_s,\n )\n\n def evaluate(det_s=None, det_sky_traj=None):\n # make sure we have at least some input for eval\n if det_s is None and det_sky_traj is None:\n raise ValueError(\"one of det_s and det_sky_traj is required\")\n if det_s is not None and det_sky_traj is not None:\n if det_s.shape != det_sky_traj['alt'].shape:\n raise ValueError(\n \"mismatch shape in det_s and det_sky_traj\")\n if det_s is not None:\n data_shape = det_s.shape\n else:\n data_shape = det_sky_traj['alt'].shape\n # make sure the data shape matches with apt shape\n if data_shape[0] != len(det_array_name):\n raise ValueError(\n \"mismatch shape in data shape and apt length.\")\n if det_s is None:\n det_s = np.zeros(data_shape, dtype='d') << u.MJy / u.sr\n if power_loading_model is None:\n # in this case we just convert the detector surface brightness\n # to power with simple square passband.\n # convert det sb to pwr loading\n self.logger.debug(\n \"calculate power loading without loading model\")\n det_pwr = sky_sb_to_pwr(det_s)\n else:\n if det_sky_traj is None:\n raise ValueError(\n \"Power loading model requires det_sky_traj\")\n with timeit(\n f\"calculate power loading with loading model \"\n f\"{power_loading_model}\"):\n det_pwr = power_loading_model.evaluate_tod(\n det_array_name=det_array_name,\n det_s=det_s,\n det_az=det_sky_traj['az'],\n det_alt=det_sky_traj['alt'],\n f_smp=f_smp,\n noise_seed=None,\n )\n self.logger.info(\n f\"power loading at detector: \"\n f\"min={det_pwr.min()} max={det_pwr.max()}\")\n # calculate the kids signal\n nonlocal kids_probe_p\n rs, xs, iqs = kids_probe_p(det_pwr)\n return det_pwr, locals()\n return evaluate, locals()\n\n def mapping_evaluator(\n self, mapping, sources=None,\n erfa_interp_len=300. << u.s,\n eval_interp_len=0.1 << u.s,\n catalog_model_render_pixel_size=0.5 << u.arcsec):\n if sources is None:\n sources = list()\n t0 = mapping.t0\n apt = self.array_prop_table\n\n hwp_cfg = self.hwp_config\n\n def get_hwp_pa_t(t):\n # return the hwp position angle at time t\n return Angle(((hwp_cfg.f_rot * t).to_value(\n u.dimensionless_unscaled) * 2. * np.pi) << u.rad)\n\n def evaluate(t, mapping_only=False):\n time_obs = t0 + t\n n_times = len(time_obs)\n self.logger.debug(\n f\"evalute time_obs from {time_obs[0]} to \"\n f\"{time_obs[-1]} n_times={n_times}\")\n # TODO add more control for the hwp position\n hwp_pa_t = get_hwp_pa_t(t)\n # if True:\n with erfa_astrom.set(ErfaAstromInterpolator(erfa_interp_len)):\n with timeit(\"transform bore sight coords\"):\n # get bore sight trajectory and the hold flags\n holdflag = mapping.evaluate_holdflag(t)\n bs_coords = mapping.evaluate_coords(t)\n bs_coords_icrs = bs_coords.transform_to('icrs')\n # for the altaz we need to resolve the frame for the\n # toltec observer and time\n bs_coords_altaz = bs_coords.transform_to(\n resolve_sky_coords_frame(\n 'altaz',\n observer=mapping.observer,\n time_obs=time_obs\n ))\n bs_parallactic_angle = pa_from_coords(\n observer=mapping.observer,\n coords_altaz=bs_coords_altaz,\n coords_icrs=bs_coords_icrs)\n target_altaz = mapping.target.transform_to(\n bs_coords_altaz.frame)\n hwp_pa_altaz = hwp_pa_t + bs_coords_altaz.alt\n hwp_pa_icrs = hwp_pa_altaz + bs_parallactic_angle\n bs_sky_bbox_icrs = SkyBoundingBox.from_lonlat(\n bs_coords_icrs.ra, bs_coords_icrs.dec)\n bs_sky_bbox_altaz = SkyBoundingBox.from_lonlat(\n bs_coords_altaz.az, bs_coords_altaz.alt)\n self.logger.debug(\n f\"sky_bbox icrs={bs_sky_bbox_icrs} \"\n f\"altaz={bs_sky_bbox_altaz}\")\n # make the model to project detector positions\n det_sky_traj = self._get_detector_sky_traj(\n time_obs=time_obs,\n bs_coords_altaz=bs_coords_altaz,\n bs_coords_icrs=bs_coords_icrs,\n evaluate_interp_len=eval_interp_len)\n det_ra = det_sky_traj['ra']\n det_dec = det_sky_traj['dec']\n det_pa_icrs = det_sky_traj['pa_icrs']\n det_sky_bbox_icrs = SkyBoundingBox.from_lonlat(\n det_ra, det_dec)\n det_sky_bbox_altaz = SkyBoundingBox.from_lonlat(\n det_sky_traj['az'], det_sky_traj['alt'])\n self.logger.info(\n f\"det sky traj bbox:\\n\"\n f\"ra: {det_sky_bbox_icrs.w!s} - {det_sky_bbox_icrs.e!s}\\n\"\n f\"dec: {det_sky_bbox_icrs.s!s} - {det_sky_bbox_icrs.n!s}\\n\"\n f\"az: {det_sky_bbox_altaz.w!s} \"\n f\"- {det_sky_bbox_altaz.e!s}\\n\"\n f\"alt: {det_sky_bbox_altaz.s!s} \"\n f\"- {det_sky_bbox_altaz.n!s}\\n\"\n f\"size: {det_sky_bbox_icrs.width}, \"\n f\"{det_sky_bbox_icrs.height}\"\n )\n\n # import matplotlib.pyplot as plt\n # fig, ax = plt.subplots(1, 1)\n # for i in range(400):\n # for j in range(0, det_ra.shape[1], 10):\n # plt.plot(\n # [det_ra.degree[i, j]],\n # [det_dec.degree[i, j]],\n # marker=(2, 0, det_pa_icrs.degree[i, j]),\n # markersize=5, linestyle=None)\n # plt.show()\n if mapping_only:\n return locals()\n # get source flux from models\n s_additive = list()\n for m_source in sources:\n # TODO maybe there is a faster way of handling the\n # catalog source model directly. For now\n # we just convert it to image model\n if isinstance(m_source, CatalogSourceModel):\n # get fwhms from toltec_info\n fwhms = dict()\n for array_name in self.array_names:\n fwhms[array_name] = toltec_info[\n array_name]['a_fwhm']\n m_source = m_source.make_image_model(\n fwhms=fwhms,\n pixscale=catalog_model_render_pixel_size / u.pix\n )\n if isinstance(m_source, ImageSourceModel):\n # TODO support more types of wcs. For now\n # only ICRS is supported\n with timeit(\n \"extract flux from source image model\"):\n # we only pass the pa and hwp when we want\n # them to be eval for polarimetry\n source_eval_kw = dict()\n if self.polarized:\n source_eval_kw['det_pa_icrs'] = det_pa_icrs\n if hwp_cfg.rotator_enabled:\n source_eval_kw['hwp_pa_icrs'] = hwp_pa_icrs\n s = m_source.evaluate_tod_icrs(\n apt['array_name'],\n det_ra,\n det_dec,\n **source_eval_kw\n )\n s_additive.append(s)\n if len(s_additive) <= 0:\n self.logger.debug(\"no surface brightness model available\")\n s = np.zeros(det_ra.shape) << u.MJy / u.sr\n else:\n s = s_additive[0]\n for _s in s_additive[1:]:\n s += _s\n self.logger.info(\n f\"source surface brightness at detector: \"\n f\"min={s.min()} max={s.max()}\")\n return s, locals()\n return evaluate, locals()\n\n @contextmanager\n def iter_eval_context(self, simu_config):\n \"\"\"Run the simuation defined by `simu_config`.\"\"\"\n mapping_model = simu_config.mapping_model\n source_models = simu_config.source_models\n obs_params = simu_config.obs_params\n perf_params = simu_config.perf_params\n t_simu = simu_config.t_simu\n\n # split the sources based on their base class\n # we need to make sure the TolTEC power loading source is only\n # specified once\n sources_sb = list()\n power_loading_model = None\n sources_unknown = list()\n for s in source_models:\n if isinstance(s, SurfaceBrightnessModel):\n sources_sb.append(s)\n elif isinstance(s, ToltecPowerLoadingModel):\n if power_loading_model is not None:\n raise ValueError(\n \"multiple TolTEC power loading model found.\")\n power_loading_model = s\n else:\n sources_unknown.append(s)\n self.logger.debug(f\"surface brightness sources:\\n{sources_sb}\")\n self.logger.debug(f\"power loading model:\\n{power_loading_model}\")\n self.logger.warning(f\"ignored sources:\\n{sources_unknown}\")\n\n # create the time grids\n # this is the iterative eval time grids\n t_chunks = make_time_grid(\n t=t_simu,\n f_smp=obs_params.f_smp_probing,\n chunk_len=perf_params.chunk_len)\n # this is used for doing pre-eval calcuation.\n t_grid_pre_eval = np.linspace(\n 0, t_simu.to_value(u.s),\n perf_params.pre_eval_t_grid_size\n ) << u.s\n\n # create the mapping evaluator first so that we can get full\n # sky bbox for the observation\n # the altitude is needed to create the probing evaluator\n mapping_evaluator, mapping_eval_ctx = self.mapping_evaluator(\n mapping=mapping_model, sources=sources_sb,\n erfa_interp_len=perf_params.mapping_erfa_interp_len,\n eval_interp_len=perf_params.mapping_eval_interp_len,\n catalog_model_render_pixel_size=(\n perf_params.catalog_model_render_pixel_size),\n )\n # this context es is to hold any contexts during the iterative\n # eval\n es = ExitStack()\n # we run the mapping eval to get the det_sky_traj for the entire\n # simu\n mapping_info = mapping_evaluator(\n t_grid_pre_eval, mapping_only=True)\n # compute the extent for detectors\n bbox_padding = (\n perf_params.pre_eval_sky_bbox_padding_size,\n perf_params.pre_eval_sky_bbox_padding_size,\n )\n # here we add some padding to the bbox\n det_sky_traj = mapping_info['det_sky_traj']\n det_sky_bbox_icrs = SkyBoundingBox.from_lonlat(\n det_sky_traj['ra'], det_sky_traj['dec']).pad_with(\n *bbox_padding)\n det_sky_bbox_altaz = SkyBoundingBox.from_lonlat(\n det_sky_traj['az'], det_sky_traj['alt']).pad_with(\n *bbox_padding)\n self.logger.info(\n f\"pre-eval sky bbox:\\n\"\n f\"ra: {det_sky_bbox_icrs.w!s} - {det_sky_bbox_icrs.e!s}\\n\"\n f\"dec: {det_sky_bbox_icrs.s!s} - {det_sky_bbox_icrs.n!s}\\n\"\n f\"az: {det_sky_bbox_altaz.w!s} - {det_sky_bbox_altaz.e!s}\\n\"\n f\"alt: {det_sky_bbox_altaz.s!s} - {det_sky_bbox_altaz.n!s}\\n\"\n f\"size: {det_sky_bbox_icrs.width}, {det_sky_bbox_icrs.height}\"\n )\n # this apt will be modified and saved in the eval context\n apt = self.array_prop_table.copy()\n det_array_name = apt['array_name']\n # setup interp for power loading model\n if power_loading_model is not None:\n alt_deg_step = perf_params.aplm_eval_interp_alt_step.to_value(\n u.deg)\n interp_alt_grid = np.arange(\n det_sky_bbox_altaz.s.degree,\n det_sky_bbox_altaz.n.degree + alt_deg_step,\n alt_deg_step,\n ) << u.deg\n if len(interp_alt_grid) < 5:\n raise ValueError('aplm_eval_interp_alt_step too small.')\n es.enter_context(\n power_loading_model.aplm_eval_interp_context(\n t0=mapping_model.t0,\n t_grid=t_grid_pre_eval,\n sky_bbox_altaz=det_sky_bbox_altaz,\n alt_grid=interp_alt_grid,\n ))\n # also we setup the toast slabs if atm_model_name is set to\n # toast\n if power_loading_model.atm_model_name == 'toast':\n es.enter_context(\n power_loading_model.toast_atm_eval_context(\n sky_bbox_altaz=det_sky_bbox_altaz\n )\n )\n # figure out the tune power and flxscale\n # we use the closest point on the boresight to the target\n # for the tune obs\n bs_coords_icrs = mapping_info['bs_coords_altaz']\n target_icrs = mapping_model.target.transform_to('icrs')\n i_closest = np.argmin(\n target_icrs.separation(bs_coords_icrs))\n det_az_tune = det_sky_traj['az'][:, i_closest]\n det_alt_tune = det_sky_traj['alt'][:, i_closest]\n\n self.logger.debug(f\"use tune at detector alt={det_alt_tune.mean()}\")\n if power_loading_model is None:\n # when power loading model is not set, we use the apt default\n kids_p_tune = apt['background']\n else:\n kids_p_tune = power_loading_model.get_P(\n det_array_name=det_array_name,\n det_az=Angle(np.full(\n len(det_array_name), det_az_tune.degree) << u.deg),\n det_alt=Angle(np.full(\n len(det_array_name), det_alt_tune.degree) << u.deg)\n )\n for array_name in self.array_names:\n m = (det_array_name == array_name)\n p = kids_p_tune[m].mean()\n self.logger.debug(f\"set tune of {array_name} at P={p}\")\n\n # TODO allow adjust kids_fp\n probing_evaluator, probing_eval_ctx = self.probing_evaluator(\n kids_p_tune=kids_p_tune,\n kids_fp=None,\n f_smp=obs_params.f_smp_probing,\n power_loading_model=power_loading_model,\n )\n # compute the detector flxscale for sb = 1MJy\n kids_probe_p = probing_eval_ctx['kids_probe_p']\n sky_sb_to_pwr = probing_eval_ctx['sky_sb_to_pwr']\n p_sb_unity = np.squeeze(sky_sb_to_pwr(\n det_s=np.ones((len(det_array_name), 1)) << u.MJy / u.sr\n ))\n p_norm = p_sb_unity + kids_p_tune\n _, x_norm, _ = kids_probe_p(p_norm[:, np.newaxis])\n x_norm = np.squeeze(x_norm)\n flxscale = 1. / x_norm\n # update the apt\n apt['background'] = kids_p_tune\n apt['flxscale'] = flxscale\n\n for array_name in self.array_names:\n m = (det_array_name == array_name)\n _kids_p_tune = kids_p_tune[m].mean()\n _p_sb_unity = p_sb_unity[m].mean()\n _p_norm = p_norm[m].mean()\n _x_norm = x_norm[m].mean()\n _flxscale = flxscale[m].mean()\n self.logger.debug(\n f\"summary of probing setup for {array_name}:\\n\"\n f\" kids_p_tune={_kids_p_tune}\\n\"\n f\" p_sb_unity={_p_sb_unity}\\n\"\n f\" p_norm={_p_norm}\\n\"\n f\" x_norm={_x_norm}\\n\"\n f\" flxscale={_flxscale}\\n\"\n )\n\n # import matplotlib.pyplot as plt\n # import matplotlib.patches as patches\n # fig, ax = plt.subplots(1, 1)\n # ax.plot(\n # det_sky_traj['az'].degree, det_sky_traj['alt'].degree,\n # linestyle='none', marker='o')\n # p = patches.Rectangle(\n # (\n # det_sky_bbox_altaz.w.to_value(u.deg),\n # det_sky_bbox_altaz.s.to_value(u.deg),\n # ),\n # det_sky_bbox_altaz.width.to_value(u.deg),\n # det_sky_bbox_altaz.height.to_value(u.deg),\n # linewidth=1, edgecolor='r', facecolor='none')\n # ax.add_patch(p)\n # det_az_tune = det_sky_traj['az'][:, i_closest]\n # c = kids_p_tune.to_value(u.pW)[m]\n # ax.scatter(\n # det_az_tune[m], det_alt_tune[m],\n # c=c, s=100, vmin=c.min(), vmax=c.max())\n # plt.show()\n\n # now we are ready to return the iterative evaluator\n def evaluate(t):\n det_s, mapping_info = mapping_evaluator(t)\n det_sky_traj = mapping_info['det_sky_traj']\n det_pwr, probing_info = probing_evaluator(\n det_s=det_s, det_sky_traj=det_sky_traj)\n return locals()\n\n self._eval_context = locals()\n yield evaluate, t_chunks\n # release the contexts\n es.close()\n self._eval_context = None\n\n\nclass ToltecSimuOutputContext(ExitStack):\n \"\"\"A context class to manage TolTEC simulator output files.\n\n \"\"\"\n\n logger = get_logger()\n _lockfile = 'simu.lock'\n _statefile = 'simustate.yaml'\n\n def __init__(self, simulator, rootpath):\n super().__init__()\n self._simulator = simulator\n self._rootpath = rootpath\n self._state = None\n self._nms = dict()\n\n @property\n def simulator(self):\n return self._simulator\n\n @property\n def rootpath(self):\n return self._rootpath\n\n @property\n def state(self):\n return self._state\n\n @property\n def nms(self):\n \"\"\"The dict of nc file mappers.\"\"\"\n return self._nms\n\n def _create_nm(self, interface, suffix):\n if interface in self.nms:\n raise ValueError(f\"NcNodeMapper already exists for {interface}\")\n output_filepath = self.make_output_filename(interface, suffix)\n nm = self.nms[interface] = NcNodeMapper(\n source=output_filepath, mode='w')\n return nm\n\n def _get_tel_interface(self):\n return 'tel'\n\n def write_mapping_meta(self, mapping, simu_config):\n \"\"\"Save the mapping model to tel.nc.\"\"\"\n # create tel.nc\n nm_tel = self._create_nm(self._get_tel_interface(), '.nc')\n self.logger.debug(\n f\"save mapping model {mapping} to {nm_tel}\")\n nc_tel = nm_tel.nc_node\n # populate the headers\n # add some common settings to the nc tel header\n rootpath = simu_config.runtime_info.config_info.runtime_context_dir\n nm_tel.setstr(\n 'Header.File.Name',\n nm_tel.file_loc.path.relative_to(rootpath).as_posix())\n nm_tel.setstr(\n 'Header.Source.SourceName',\n simu_config.jobkey)\n if isinstance(mapping, (LmtTcsTrajMappingModel, )):\n self.logger.debug(\n f\"mapping model meta:\\n{pformat_yaml(mapping.meta)}\")\n nm_tel.setstr(\n 'Header.Dcs.ObsPgm',\n mapping.meta['mapping_type'])\n elif mapping.pattern_kind & PatternKind.lissajous:\n # TODO handle lissajous\n nm_tel.setstr(\n 'Header.Dcs.ObsPgm',\n 'Lissajous')\n elif mapping.pattern_kind & PatternKind.raster_like:\n nm_tel.setstr(\n 'Header.Dcs.ObsPgm',\n 'Map')\n else:\n raise NotImplementedError\n # the len=2 is for mean and ref coordinates.\n d_coord = 'Header.Source.Ra_xlen'\n nc_tel.createDimension(d_coord, 2)\n v_source_ra = nc_tel.createVariable(\n 'Header.Source.Ra', 'f8', (d_coord, ))\n v_source_ra.unit = 'rad'\n v_source_dec = nc_tel.createVariable(\n 'Header.Source.Dec', 'f8', (d_coord, ))\n v_source_dec.unit = 'rad'\n\n ref_coord = mapping.target.transform_to('icrs')\n v_source_ra[:] = ref_coord.ra.radian\n v_source_dec[:] = ref_coord.dec.radian\n\n # setup data variables for write_data\n d_time = 'time'\n nc_tel.createDimension(d_time, None)\n m = dict() # this get added to the node mapper\n m['time'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelTime', 'f8', (d_time, ))\n v_ra = m['ra'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelRaAct', 'f8', (d_time, ))\n v_ra.unit = 'rad'\n v_dec = m['dec'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelDecAct', 'f8', (d_time, ))\n v_dec.unit = 'rad'\n v_alt = m['alt'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelElAct', 'f8', (d_time, ))\n v_alt.unit = 'rad'\n v_az = m['az'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelAzAct', 'f8', (d_time, ))\n v_az.unit = 'rad'\n v_pa = m['pa'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.ActParAng', 'f8', (d_time, ))\n v_pa.unit = 'rad'\n # the _sky az alt and pa are the corrected positions of telescope\n # they are the same as the above when no pointing correction\n # is applied.\n v_alt_sky = m['alt_sky'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelElSky', 'f8', (d_time, ))\n v_alt_sky.unit = 'rad'\n v_az_sky = m['az_sky'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.TelAzSky', 'f8', (d_time, ))\n v_az_sky.unit = 'rad'\n v_pa_sky = m['pa_sky'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.ParAng', 'f8', (d_time, ))\n v_pa_sky.unit = 'rad'\n m['hold'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.Hold', 'f8', (d_time, )\n )\n v_source_alt = m['source_alt'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.SourceEl', 'f8', (d_time, ))\n v_source_alt.unit = 'rad'\n v_source_az = m['source_az'] = nc_tel.createVariable(\n 'Data.TelescopeBackend.SourceAz', 'f8', (d_time, ))\n v_source_az.unit = 'rad'\n nm_tel.update(m)\n return nm_tel\n\n def _get_kidsdata_interface(self, nw):\n return f'toltec{nw}'\n\n def _make_kidsdata_nc(self, apt, nw, simu_config):\n state = self.state\n mapt = apt[apt['nw'] == nw]\n interface = self._get_kidsdata_interface(nw)\n nm_toltec = self._create_nm(interface, '_timestream.nc')\n nc_toltec = nm_toltec.nc_node\n # add meta data\n rootpath = simu_config.runtime_info.config_info.runtime_context_dir\n obs_params = simu_config.obs_params\n nm_toltec.setstr(\n 'Header.Toltec.Filename',\n nm_toltec.file_loc.path.relative_to(rootpath).as_posix())\n nm_toltec.setscalar(\n 'Header.Toltec.ObsType', 1, dtype='i4') # Timestream\n nm_toltec.setscalar(\n 'Header.Toltec.Master', 0, dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.RepeatLevel', 0, dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.RoachIndex', nw, dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.ObsNum', state['obsnum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.SubObsNum',\n state['subobsnum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.ScanNum',\n state['scannum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.TargSweepObsNum',\n state['cal_obsnum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.TargSweepSubObsNum',\n state['cal_subobsnum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.TargSweepScanNum',\n state['cal_scannum'], dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.SampleFreq',\n obs_params.f_smp_probing.to_value(u.Hz))\n nm_toltec.setscalar(\n 'Header.Toltec.LoCenterFreq', 0.)\n nm_toltec.setscalar(\n 'Header.Toltec.DriveAtten', 0.)\n nm_toltec.setscalar(\n 'Header.Toltec.SenseAtten', 0.)\n nm_toltec.setscalar(\n 'Header.Toltec.AccumLen', 524288, dtype='i4')\n nm_toltec.setscalar(\n 'Header.Toltec.MaxNumTones', 1000, dtype='i4')\n\n nc_toltec.createDimension('numSweeps', 1)\n nc_toltec.createDimension('toneFreqLen', len(mapt))\n v_tones = nc_toltec.createVariable(\n 'Header.Toltec.ToneFreq',\n 'f8', ('numSweeps', 'toneFreqLen')\n )\n v_tones[0, :] = mapt['fp']\n nc_toltec.createDimension('modelParamsNum', 15)\n nc_toltec.createDimension('modelParamsHeaderItemSize', 32)\n v_mph = nc_toltec.createVariable(\n 'Header.Toltec.ModelParamsHeader',\n '|S1', ('modelParamsNum', 'modelParamsHeaderItemSize')\n )\n v_mp = nc_toltec.createVariable(\n 'Header.Toltec.ModelParams',\n 'f8', ('numSweeps', 'modelParamsNum', 'toneFreqLen')\n )\n mp_map = {\n 'f_centered': ('fp', u.Hz),\n 'f_out': ('fr', u.Hz),\n 'f_in': ('fp', u.Hz),\n 'flag': None,\n 'fp': ('fp', u.Hz),\n 'Qr': 'Qr',\n 'Qc': 1.,\n 'fr': ('fr', u.Hz),\n 'A': 0.,\n 'normI': 'g0',\n 'normQ': 'g1',\n 'slopeI': ('k0', u.s),\n 'slopeQ': ('k1', u.s),\n 'interceptI': 'm0',\n 'interceptQ': 'm1',\n }\n for i, (k, v) in enumerate(mp_map.items()):\n v_mph[i, :] = netCDF4.stringtochar(np.array(k, '|S32'))\n if v is not None:\n if isinstance(v, str):\n v = mapt[v]\n elif isinstance(v, tuple):\n v = mapt[v[0]].to_value(v[1])\n v_mp[0, i, :] = v\n\n # data variables\n m = dict()\n nc_toltec.createDimension('loclen', len(mapt))\n nc_toltec.createDimension('iqlen', len(mapt))\n nc_toltec.createDimension('tlen', 6)\n nc_toltec.createDimension('time', None)\n m['flo'] = nc_toltec.createVariable(\n 'Data.Toltec.LoFreq', 'i4', ('time', ))\n m['time'] = nc_toltec.createVariable(\n 'Data.Toltec.Ts', 'i4', ('time', 'tlen'))\n m['I'] = nc_toltec.createVariable(\n 'Data.Toltec.Is', 'i4', ('time', 'iqlen'))\n m['Q'] = nc_toltec.createVariable(\n 'Data.Toltec.Qs', 'i4', ('time', 'iqlen'))\n nm_toltec.update(m)\n return nm_toltec\n\n def _get_hwp_interface(self):\n return 'hwp'\n\n def _make_hwp_nc(self, simu_config):\n sim = self._simulator\n hwp_cfg = sim.hwp_config\n # if not hwp_cfg.rotator_enabled:\n # return None\n state = self.state\n interface = self._get_hwp_interface()\n nm_hwp = self._create_nm(interface, '.nc')\n nc_hwp = nm_hwp.nc_node\n # add meta data\n rootpath = simu_config.runtime_info.config_info.runtime_context_dir\n nm_hwp.setstr(\n 'Header.Toltec.Filename',\n nm_hwp.file_loc.path.relative_to(rootpath).as_posix())\n nm_hwp.setscalar(\n 'Header.Toltec.ObsType', 1, dtype='i4') # Timestream\n nm_hwp.setscalar(\n 'Header.Toltec.Master', 0, dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.RepeatLevel', 0, dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.ObsNum', state['obsnum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.SubObsNum',\n state['subobsnum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.ScanNum',\n state['scannum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.TargSweepObsNum',\n state['cal_obsnum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.TargSweepSubObsNum',\n state['cal_subobsnum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Toltec.TargSweepScanNum',\n state['cal_scannum'], dtype='i4')\n nm_hwp.setscalar(\n 'Header.Hwp.SampleFreq',\n hwp_cfg.f_smp.to_value(u.Hz))\n nm_hwp.setscalar(\n 'Header.Hwp.RotatorEnabled',\n hwp_cfg.rotator_enabled, dtype='i4')\n # data variables\n m = dict()\n nc_hwp.createDimension('tlen', 6)\n nc_hwp.createDimension('time', None)\n m['pa'] = nc_hwp.createVariable(\n 'Data.Hwp.', 'f8', ('time', ))\n m['time'] = nc_hwp.createVariable(\n 'Data.Toltec.Ts', 'i4', ('time', 'tlen'))\n nm_hwp.update(m)\n return nm_hwp\n\n def _ensure_sim_eval_context(self):\n if getattr(self._simulator, '_eval_context', None) is None:\n raise ValueError(\n \"Cannot write sim meta without eval context open\")\n return self._simulator._eval_context\n\n def write_sim_meta(self, simu_config):\n eval_ctx = self._ensure_sim_eval_context()\n # apt.ecsv\n apt = eval_ctx['apt']\n # write the apt\n apt.write(self.make_output_filename('apt', '.ecsv'),\n format='ascii.ecsv', overwrite=True)\n # toltec*.nc\n for nw in np.unique(apt['nw']):\n self._make_kidsdata_nc(apt, nw, simu_config)\n # hwp.nc\n self._make_hwp_nc(simu_config)\n\n def write_sim_data(self, data):\n eval_ctx = self._ensure_sim_eval_context()\n # apt.ecsv\n apt = eval_ctx['apt']\n\n nm_tel = self.nms[self._get_tel_interface()]\n nc_tel = nm_tel.nc_node\n d_time = 'time'\n\n bs_coords_icrs = data['mapping_info']['bs_coords_icrs']\n bs_coords_altaz = data['mapping_info']['bs_coords_altaz']\n bs_parallactic_angle = data['mapping_info']['bs_parallactic_angle']\n target_altaz = data['mapping_info']['target_altaz']\n hwp_pa_altaz = data['mapping_info']['hwp_pa_altaz']\n time_obs = data['mapping_info']['time_obs']\n holdflag = data['mapping_info']['holdflag']\n t_grid = data['t']\n iqs = data['probing_info']['iqs']\n\n idx = nc_tel.dimensions[d_time].size\n nm_tel.getvar('time')[idx:] = time_obs.unix\n nm_tel.getvar('ra')[idx:] = bs_coords_icrs.ra.radian\n nm_tel.getvar('dec')[idx:] = bs_coords_icrs.dec.radian\n nm_tel.getvar('az')[idx:] = bs_coords_altaz.az.radian\n nm_tel.getvar('alt')[idx:] = bs_coords_altaz.alt.radian\n nm_tel.getvar('pa')[idx:] = bs_parallactic_angle.radian\n # no pointing model\n nm_tel.getvar('az_sky')[idx:] = bs_coords_altaz.az.radian\n nm_tel.getvar('alt_sky')[idx:] = bs_coords_altaz.alt.radian\n nm_tel.getvar('pa_sky')[idx:] = bs_parallactic_angle.radian\n\n nm_tel.getvar('source_az')[idx:] = target_altaz.az.radian\n nm_tel.getvar('source_alt')[idx:] = target_altaz.alt.radian\n nm_tel.getvar('hold')[idx:] = holdflag\n self.logger.info(\n f'write [{idx}:{idx + len(time_obs)}] to'\n f' {nc_tel.filepath()}')\n for nw in np.unique(apt['nw']):\n nm_toltec = self.nms[self._get_kidsdata_interface(nw)]\n nc_toltec = nm_toltec.nc_node\n idx = nc_toltec.dimensions[d_time].size\n self.logger.info(\n f'write [{nc_toltec.dimensions[\"iqlen\"].size}]'\n f'[{idx}:{idx + len(time_obs)}] to'\n f' {nc_toltec.filepath()}')\n m = (apt['nw'] == nw)\n nm_toltec.getvar('flo')[idx:] = 0\n nm_toltec.getvar('time')[idx:, 0] = t_grid\n nm_toltec.getvar('I')[idx:, :] = iqs.real[m, :].T\n nm_toltec.getvar('Q')[idx:, :] = iqs.imag[m, :].T\n\n # hwp\n nm_hwp = self.nms[self._get_hwp_interface()]\n nc_hwp = nm_hwp.nc_node\n idx = nc_hwp.dimensions[d_time].size\n self.logger.info(\n f'write '\n f'[{idx}:{idx + len(time_obs)}] to'\n f' {nc_hwp.filepath()}')\n nm_hwp.getvar('time')[idx:, 0] = time_obs.unix\n nm_hwp.getvar('pa')[idx:] = hwp_pa_altaz.radian\n\n def open(self, overwrite=False):\n \"\"\"Open files to save data.\n\n This increments the obsnum state and makes available a set of opened\n file handlers in :attr:`nms`.\n\n Parameters\n ----------\n overwrite : bool, optional\n If True, the obsnum is not incremented.\n \"\"\"\n state = self._state = self.enter_context(self.writelock())\n # check if the previous data is valid\n valid = state.get('valid', False)\n if not valid:\n self.logger.warning(f\"overwrite invalid state entry:\\n{state}\")\n overwrite = True\n if not overwrite:\n state['obsnum'] += 1\n state['cal_obsnum'] += 1\n state['ut'] = datetime.utcnow()\n state['valid'] = False\n state.sync()\n self.logger.info(f\"simulator output state:\\n{state}\")\n return self\n\n def __exit__(self, *args):\n self.state['valid'] = True\n self.state.sync()\n super().__exit__(*args)\n # make a copy of the state file for this obsnum\n shutil.copy(self.state.filepath, self.make_output_filename(\n 'simustate', '.yaml'))\n # clean up the contexts\n self._state = None\n # close all files and clear the mapper\n for nm in self.nms.values():\n nm.close()\n self.nms.clear()\n\n @contextmanager\n def writelock(self):\n outdir = self.rootpath\n lockfile = outdir.joinpath(self._lockfile)\n if lockfile.exists():\n raise RuntimeError(f\"cannot acquire write lock for {outdir}\")\n state = PersistentState(\n outdir.joinpath(self._statefile),\n init={\n 'obsnum': 1,\n 'subobsnum': 0,\n 'scannum': 0,\n 'cal_obsnum': 1,\n 'cal_subobsnum': 0,\n 'cal_scannum': 0,\n })\n try:\n with open(lockfile, 'w'):\n self.logger.debug(f'create lock file: {lockfile}')\n pass\n yield state.sync()\n finally:\n try:\n lockfile.unlink()\n self.logger.debug(f'unlink lock file: {lockfile}')\n except Exception:\n self.logger.debug(\"failed release write lock\", exc_info=True)\n\n def make_output_filename(self, interface, suffix):\n state = self.state\n filename = (\n f'{interface}_{state[\"obsnum\"]:06d}_'\n f'{state[\"subobsnum\"]:03d}_'\n f'{state[\"scannum\"]:04d}_'\n f'{state[\"ut\"].strftime(\"%Y_%m_%d_%H_%M_%S\")}'\n f'{suffix}'\n )\n return self.rootpath.joinpath(filename)\n"
] | [
[
"numpy.sqrt",
"numpy.squeeze",
"numpy.vstack",
"numpy.mean",
"numpy.exp",
"numpy.random.default_rng",
"numpy.sin",
"numpy.std",
"numpy.nansum",
"numpy.diff",
"numpy.ravel",
"numpy.zeros",
"numpy.median",
"numpy.tan",
"numpy.array",
"numpy.meshgrid",
"numpy.sum",
"numpy.cos",
"numpy.expm1",
"numpy.ones",
"numpy.random.normal",
"numpy.shape",
"numpy.isscalar",
"numpy.empty"
],
[
"numpy.arange",
"numpy.argmin",
"numpy.linspace"
],
[
"numpy.arctan2",
"numpy.array",
"numpy.sqrt"
],
[
"numpy.unique",
"numpy.arange",
"numpy.squeeze",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.isscalar",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
kagemusha/streamingbandit | [
"f5228611a0ac9432e958761dd6d68d972d7d163b"
] | [
"app/libs/thompson_bayesian_linear.py"
] | [
"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom libs.base import *\nimport ast\n\nglobal numpy\n\nclass ThompsonBayesianLinear():\n \"\"\" Class for Thompson sampling for Bayesian Linear Regression\n \n :var dict default: The value of the model, consisting of a 1*p \\\n list of J, p*p list of P and an error rate.\n \"\"\"\n def __init__(self, default):\n if default == {}:\n self.value = {'J' : [0, 0], 'P' : [[1, 0],[0, 1]], 'err' : 1}\n else:\n self.value = default.copy()\n if isinstance(self.value['J'], str) == True:\n self.value['J'] = ast.literal_eval(self.value['J'])\n if isinstance(self.value['P'], str) == True:\n self.value['P'] = ast.literal_eval(self.value['P'])\n if isinstance(self.value['err'], str) == True:\n self.value['err'] = ast.literal_eval(self.value['err'])\n self.value['J'] = np.matrix(self.value['J'])\n self.value['P'] = np.matrix(self.value['P'])\n\n def get_dict(self):\n \"\"\" Return all the variables that are needed to do an online estimation \\\n in a dictionary. Or to save the parameters in a database.\n \"\"\"\n to_dict = self.value.copy()\n to_dict['J'] = to_dict['J'].tolist()\n to_dict['P'] = to_dict['P'].tolist()\n return to_dict\n\n def update(self, y, x, discount = 1):\n \"\"\" Update the Bayesian linear model.\n \n :param int y: The observation value.\n :param list x: A list of ints of the regressors.\n :param int discount: A discount. Default is 1, which means no discount is used.\n \"\"\"\n y = y\n x = np.matrix(x)\n self.value['J'] = (discount*(x*y)/self.value['err']) + self.value['J']\n self.value['P'] = (discount*(x.T*x)/self.value['err']) + self.value['P']\n \n def sample(self):\n \"\"\" Return a sample of coefficients Betas using Thompson sampling.\n\n \"\"\"\n # Transform J = Sigma^-1 * mu to mu\n # Transform P = Sigma^-1 to Sigma\n sigma = np.linalg.inv(self.value['P'])\n mu = sigma * self.value['J'].T\n mu = np.squeeze(np.asarray(mu))\n # Random draw from np.random.multivariate_normal\n betas = np.random.multivariate_normal(mu,sigma)\n # Prediction is y_t ~ N(betas.T * x, sigma^2)\n #y = np.random.normal(np.dot(betas.T, x), err)\n return betas\n"
] | [
[
"numpy.matrix",
"numpy.random.multivariate_normal",
"numpy.linalg.inv",
"numpy.asarray"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdss/chernosim | [
"734f349c82d85dd6a1fe00013d21025ef0e9b845"
] | [
"chernosim/acquisition/acquisition.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# @Author: José Sánchez-Gallego ([email protected])\n# @Date: 2020-09-13\n# @Filename: acquisition.py\n# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)\n\nimport collections\nimport functools\nimport multiprocessing\nimport pathlib\nimport shutil\nimport warnings\n\nimport astropy.table\nimport astropy.wcs\nimport matplotlib.patches\nimport matplotlib.pyplot\nimport numpy\nimport pandas\nimport tqdm\nimport yaml\n\nfrom cherno.astrometry import AstrometryNet\nfrom sdsstools import read_yaml_file\n\nfrom .. import config\nfrom .utils import (create_gfa_wcs, get_gfa_centre, get_uniform_ra_dec,\n get_wcs_rotation, query_field, sky_separation)\n\n\ndef select_stars(data, boresight, observatory='apo',\n r1=None, r2=None, phi=None, gfa_rot=None):\n \"\"\"Selects stars for the simulation.\n\n Given a dataframe with a list of stars, returns a subset of the dataframe\n with stars that fall within the footprint of the GFA chips.\n\n The GFAs are defined as the areas that subtend an angle ``phi`` with\n respect to the boresight in an annulus of radii ``r1`` and ``r2``. The\n rotation angle of each camera is one of the ``gfa_rot`` values, with zero\n degrees corresponding to the direction of the celestial North. This is an\n approximation of the real footprint of the GFA, which are rectangular and\n not an annulus sector, but the areas are comparable and this provides a\n simple way to select the stars.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees.\n boresight : tuple\n A tuple with the right ascension and declination of the boresight,\n in degrees.\n observatory : str\n The observatory, used to load the default configuration for the GFAs.\n r1,r2 : float\n The internal and external radii along which the GFAs are located, in\n degrees.\n phi : float\n The angle subtended by each GFA, in degrees.\n gfa_rot : list\n A list with the rotation of each GFA, with respect to the boresight,\n in degrees.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe restricted to the stars that fall within the\n footprint of each GFA. A new column ``gfa`` is added with the index\n of the GFA, which correspond to the ``gfa_rot`` rotation.\n\n \"\"\"\n\n # Get data from configuration file if not provided.\n obs_data = config[observatory]\n r1 = r1 or obs_data['r1']\n r2 = r2 or obs_data['r2']\n phi = phi or obs_data['phi']\n gfa_rot = gfa_rot or obs_data['gfa_rot']\n\n b_ra = boresight[0]\n b_dec = boresight[1]\n\n ra_rad = numpy.radians(data.ra)\n dec_rad = numpy.radians(data.dec)\n delta_ra_rad = ra_rad - numpy.radians(b_ra)\n\n # Calculate the separation between each star and the boresight.\n sep = numpy.degrees(\n numpy.arccos(\n numpy.sin(dec_rad) * numpy.sin(numpy.radians(b_dec)) +\n numpy.cos(dec_rad) * numpy.cos(numpy.radians(b_dec)) *\n numpy.cos(delta_ra_rad)\n )\n )\n\n # Remove stars that ar not in the GFA annulus\n data = data.loc[(sep > r1) & (sep < r2)]\n\n if len(data) == 0:\n data.loc[:, ['gfa', 'theta']] = numpy.nan\n return data\n\n sep = sep[(sep > r1) & (sep < r2)]\n sep_rad = numpy.radians(sep)\n\n ra_rad = numpy.radians(data.ra)\n dec_rad = numpy.radians(data.dec)\n delta_ra_rad = ra_rad - numpy.radians(b_ra)\n\n # Calculate the angle, theta, between boresight, North, and the star.\n # We define a spherical triangle with vertices in North, boresight, and\n # each star and use the sine law.\n sin_theta = numpy.sin(delta_ra_rad) * numpy.cos(dec_rad) / numpy.sin(sep_rad)\n theta = numpy.degrees(numpy.arcsin(sin_theta))\n\n # Solve for degeneracy in arcsin.\n theta.loc[data.dec < b_dec] = 180 - theta[data.dec < b_dec]\n theta.loc[theta < 0] += 360\n\n data['theta'] = theta\n\n # Determine the GFA on which footprint each star falls, if any.\n data['gfa'] = -1\n\n for gfa_id in range(len(gfa_rot)):\n rot = gfa_rot[gfa_id]\n rot_min = (rot - phi / 2.) % 360.\n rot_max = (rot + phi / 2.) % 360.\n data.loc[(theta - rot_min) % 360. <=\n (rot_max - rot_min) % 360., 'gfa'] = gfa_id\n\n data = data.loc[data.gfa >= 0]\n\n return data\n\n\ndef radec_to_xy(data, wcs=None, **kwargs):\n \"\"\"Converts ``(RA, Dec)`` to ``(x, y)`` for a given GFA.\n\n Creates a mock WCS transformation for a given GFA and converts star RA, Dec\n to x, y on what would be a GFA image. This conversion is not carefully\n done and is not a proper transformation between on-sky coordinates and\n focal coordinates, but should be sufficient for the purposes of the\n simulation.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees.\n wcs : ~astropy.wcs.WCS\n The WCS object to use. If `None`, it calls `.create_gfa_wcs`.\n kwargs : dict\n Arguments to pass to `.create_gfa_wcs`.\n\n Returns\n -------\n `~pandas.DataFrame`, `~astropy.wcs.WCS`\n The input dataframe with two columns, ``x`` and ``y`` indicating the\n position of the star on the GFA chip, and the `~astropy.wcs.WCS`\n object.\n\n \"\"\"\n\n if len(data) == 0:\n data['x'] = numpy.nan\n data['y'] = numpy.nan\n return data\n\n if not wcs:\n wcs = create_gfa_wcs(**kwargs)\n\n # Convert coordinates to x, y\n coords = data[['ra', 'dec']].to_numpy()\n\n x, y = wcs.wcs_world2pix(coords, 0).T\n data['x'] = x\n data['y'] = y\n\n return data, wcs\n\n\ndef prepare_data(boresight, data=None, observatory='apo', r1=None, r2=None,\n mag_range=None, mag_column=None, phi=None, gfa_rot=None,\n shape=None, pixel_size=None, plate_scale=None, plot=False,\n apply_proper_motion=False, ref_epoch=2015.5, epoch=False,\n database_params=None):\n \"\"\"Prepares data to be matched by astrometry.net.\n\n Performs the following steps:\n\n - Queries the database to receive the list of observed stars.\n\n - Applies proper motions.\n\n - Select stars that fall within the footprint of the GFAs, for a\n given footprint.\n\n - Calculates the WCS of each GFA and convert the input coordinates to\n pixel coordinates on the GFA image.\n\n - Creates a global WCS for the full FOV of the telescope, with zero on\n the boresight, and converts the input coordinates to pseudo-pixels in\n that frame.\n\n Parameters\n ----------\n boresight : tuple\n A tuple with the right ascension and declination of the boresight,\n in degrees.\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``ra`` and ``dec``, in degrees. If `None`, calls `.query_field` to\n retrieve a list of stars from the database.\n observatory : str\n The observatory, used to load the default configuration for the GFAs.\n r1,r2 : float\n The internal and external radii along which the GFAs are located, in\n degrees.\n mag_range : tuple\n The range of magnitudes used to select stars.\n mag_column : str\n The name of the magnitude column to query.\n phi : float\n The angle subtended by each GFA, in degrees.\n gfa_rot : list\n A list with the rotation of each GFA, with respect to the boresight,\n in degrees.\n shape : tuple\n Number of pixels, in the x and y direction of the GFA chip.\n pixel_size : float\n The pixel size, in microns.\n plate_scale : float\n The plate scale, in mm/deg.\n plot : bool or str\n Whether to produce a plot with the input stars, GFA centres, and\n footprints. If a string, the path where to save the plot.\n apply_proper_motion : bool\n Whether to propagate the position to a given ``epoch``. Assumes the\n data returned by `.query_field` has columns ``pmra`` and ``pmdec`` in\n mas and that ``pmra`` contains a factor with the cosine of declination.\n ref_epoch : float\n The epoch of the catalogue, as a Julian year.\n epoch : float\n The epoch of the observation, as a Julian year.\n database_params : dict\n A dictionary of database parameters to be passed to `.query_field`.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe restricted to the stars that fall within the\n footprint of each GFA and with additional column indicating the GFA\n chip and x and y positions on that chip, and the global x and y\n pixel coordinates on the pseudo-frame of the FOV.\n\n \"\"\"\n\n b_ra, b_dec = boresight\n\n if data is None:\n data = query_field(boresight, r1=r1, r2=r2, observatory=observatory,\n mag_range=mag_range, mag_column=mag_column,\n database_params=database_params)\n\n data = select_stars(data, boresight, observatory=observatory,\n r1=r1, r2=r2, phi=phi, gfa_rot=gfa_rot)\n\n if apply_proper_motion:\n assert epoch is not None, 'epoch is needed to apply proper motions.'\n data['ra_orig'] = data.ra\n data['dec_orig'] = data.dec\n pmra = data.pmra / 1000 / 3600. / numpy.cos(numpy.radians(data.dec))\n pmdec = data.pmdec / 1000 / 3600.\n data.ra += pmra * (epoch - ref_epoch)\n data.dec += pmdec * (epoch - ref_epoch)\n # Deal with NaN in pmra/pmdec\n data.ra = data.ra.fillna(data.ra_orig)\n data.dec = data.dec.fillna(data.dec_orig)\n\n obs_data = config[observatory]\n gfa_rot = gfa_rot or obs_data['gfa_rot']\n plate_scale = plate_scale or obs_data['plate_scale']\n pixel_size = pixel_size or config['gfa']['pixel_size']\n\n wcs = [create_gfa_wcs(rot,\n boresight,\n observatory='apo',\n r1=r1, r2=r2,\n shape=shape,\n pixel_size=pixel_size,\n plate_scale=plate_scale)\n for rot in gfa_rot]\n\n data = data.groupby('gfa').apply(\n lambda data_gfa: radec_to_xy(data_gfa,\n wcs=wcs[data_gfa.gfa.iloc[0]])[0])\n\n if plot is not False and plot is not None:\n\n fig, ax = matplotlib.pyplot.subplots()\n\n centres = numpy.array([get_gfa_centre(rot,\n boresight,\n observatory=observatory,\n r1=r1, r2=r2)\n for rot in gfa_rot])\n\n ax.scatter(data.ra, data.dec, s=1.0, marker='.', color='b')\n ax.scatter(centres[:, 0], centres[:, 1], s=5.0, marker='x', color='r')\n\n obs_data = config[observatory]\n shape = shape or config['gfa']['shape']\n\n for ww in wcs:\n footprint = ww.calc_footprint(axes=shape)\n rect = matplotlib.patches.Polygon(footprint, facecolor='None',\n edgecolor='k', linewidth=1)\n ax.add_patch(rect)\n\n ax.set_xlim(b_ra + 1.6 / numpy.cos(numpy.radians(b_dec)),\n b_ra - 1.6 / numpy.cos(numpy.radians(b_dec)))\n ax.set_ylim(b_dec - 1.6, b_dec + 1.6)\n\n n_stars = data.groupby('gfa').size().tolist()\n ax.set_title(f'(alpha, delta)=({b_ra:.2f}, {b_dec:.2f})\\n '\n f'n_stars={sum(n_stars)} '\n f'({\", \".join(map(str, n_stars))})')\n\n ax.set_xlabel('Right Ascension [deg]')\n ax.set_ylabel('Declination [deg]')\n\n fig.savefig(plot or 'gfa.pdf')\n\n return data\n\n\ndef add_noise(data, fwhm, detection_rate=0.95, non_detection_factor=1,\n mag_thres=13, mag_column='phot_g_mean_mag'):\n r\"\"\"Adds centroiding noise to the catalogue data.\n\n Modifies the pixel coordinates in the ``data`` dataframe, adding Gaussian\n noise with :math:`\\sigma={\\rm FWHM}/2\\sqrt{2\\ln 2}` to simulate seeing.\n If ``detection_rate`` is less than 1, targets are marked as detected or\n non-detected based on the ``dection_rate`` logarithmically scaled with\n magnitude.\n\n Parameters\n ----------\n data : pandas.DataFrame\n A dataframe with the star data. Must contain at least two columns,\n ``x`` and ``y``, with the pixel coordinates.\n fwhm : float\n The FWHM of the Gaussian noise to add. It must be in pixel units.\n detection_rate : float\n The probability of a source to be detected and its centroid measured.\n non_detection_factor : float\n A proportional factor used to weight the detection rate so that\n :math:`d=d_0-a\\log(m-m_0)` where :math:`d` is the final detection rate\n that will be applied to a target, :math:`d_0` is initial\n ``detection_rate`, :math:`m` is the magnitude of the source,\n :math:`m_0` is ``mag_thres``, and :math:`a` is\n ``non_detection_factor``.\n mag_thres : float\n The magnitude above which the detection rate will be reduced\n logarithmically.\n mag_column : str\n The name of the magnitude column in the dataframe.\n\n Returns\n -------\n `~pandas.DataFrame`\n The input dataframe in which the pixel coordinates have been modified\n to add centroiding noise. An additional boolean column, ``detected``,\n is added to indicate whether the source has been detected following the\n logic described for ``detection_rate``.\n\n \"\"\"\n\n data['x_no_noise'] = data.loc[:, 'x']\n data['y_no_noise'] = data.loc[:, 'y']\n\n sigma = fwhm / (2.0 * numpy.sqrt(2 * numpy.log(2)))\n\n n = data.shape[0]\n data.x += numpy.random.normal(0, sigma, n)\n data.y += numpy.random.normal(0, sigma, n)\n\n data['detected'] = True\n\n if detection_rate >= 1.0 or not mag_column:\n return data\n\n if not mag_thres:\n mag_thres = data[mag_column].max()\n\n delta_mag = data[mag_column] - mag_thres\n delta_mag[delta_mag < 0] = 0.\n\n detection_rate = numpy.tile(detection_rate, len(data))\n detection_rate[delta_mag > 0] -= (numpy.log10(delta_mag[delta_mag > 0]) *\n non_detection_factor)\n\n non_detected = numpy.random.uniform(size=n) > detection_rate\n data.loc[:, 'detected'] = ~non_detected\n\n return data\n\n\ndef _do_one_field(fields, config_data, observatory, output_dir,\n n_attempts, field_id, data=None, overwrite=False):\n \"\"\"Simulates one field.\"\"\"\n\n boresight = fields[field_id]\n\n plate_scale = config_data[observatory]['plate_scale']\n pixel_size = config_data['gfa']['pixel_size']\n pixel_scale = pixel_size / 1000. / plate_scale * 3600. # In arcsec\n\n field_dir = (output_dir / f'{field_id:05d}').absolute()\n if field_dir.exists():\n if overwrite:\n shutil.rmtree(field_dir)\n else:\n raise RuntimeError(f'{field_dir!s} already exists.')\n field_dir.mkdir(parents=True, exist_ok=True)\n\n numpy.random.seed(config_data['seed'] + field_id)\n\n astrometry_cfg = config_data['astrometry.cfg']\n if not astrometry_cfg:\n astrometry_cfg = (pathlib.Path(__file__).parent.absolute() /\n 'etc/astrometry.cfg')\n\n if not data or field_id not in data:\n star_data = prepare_data(boresight,\n observatory=observatory,\n mag_range=config_data['mag_range'],\n mag_column=config_data['mag_column'],\n apply_proper_motion=True,\n epoch=config_data['epoch'],\n plot=False,\n shape=config_data['gfa']['shape'],\n pixel_size=config_data['gfa']['pixel_size'],\n database_params=config_data['database'],\n **config_data[observatory])\n else:\n star_data = pandas.read_hdf(data[field_id])\n\n mag_column = config_data['mag_column']\n star_data.sort_values(mag_column, inplace=True)\n\n star_data.to_hdf(field_dir / f'data_{field_id:05d}.h5', 'data')\n\n if 'limit_mag_range' in config_data and config_data['limit_mag_range']:\n limit_mag_range = config_data['limit_mag_range']\n star_data = star_data[(star_data[mag_column] >= limit_mag_range[0]) &\n (star_data[mag_column] <= limit_mag_range[1])]\n\n gfa_rot = config_data[observatory]['gfa_rot']\n gfa_centres = {gfa_id: get_gfa_centre(gfa_rot[gfa_id],\n boresight,\n observatory=observatory).tolist()\n for gfa_id in range(len(gfa_rot))}\n\n for nn in range(n_attempts):\n\n n_att = nn + 1\n prefix = f'_{field_id:05d}_{n_att:03d}'\n\n log_config = {}\n log_config['input'] = {}\n log_input = log_config['input']\n\n log_input['boresight'] = boresight\n log_input['observatory'] = observatory\n log_input['field_id'] = field_id\n log_input['attempt_id'] = n_att\n log_input['gfa_centres'] = gfa_centres\n\n att_dir = field_dir / f'{n_att:03d}'\n if att_dir.exists():\n shutil.rmtree(att_dir)\n att_dir.mkdir(parents=True, exist_ok=True)\n\n fwhm = numpy.random.uniform(*config_data['fwhm_range'])\n log_input['fwhm'] = fwhm\n\n att_data = star_data.copy()\n att_data = add_noise(\n att_data, fwhm / pixel_scale,\n detection_rate=config_data['detection_rate'],\n non_detection_factor=config_data['non_detection_factor'],\n mag_thres=config_data['mag_thres'],\n mag_column=config_data['mag_column'])\n\n log_input['n_stars'] = len(att_data)\n log_input['n_detected'] = len(att_data[att_data.detected])\n\n gfa_ids = range(config_data['gfa']['n_cameras'])\n log_input['n_stars_per_gfa'] = {i: 0 for i in gfa_ids}\n log_input['n_detected_per_gfa'] = {i: 0 for i in gfa_ids}\n\n att_data.to_hdf(att_dir / f'data{prefix}.in.h5', 'data')\n\n gfa_xyls = {}\n\n for gfa_id in att_data.gfa.unique():\n\n gfa_table = astropy.table.Table.from_pandas(\n att_data.loc[(att_data.gfa == gfa_id) & att_data.detected])\n\n n_stars_gfa = len(att_data.loc[(att_data.gfa == gfa_id)])\n n_detected = len(gfa_table)\n\n gfa_table.write(att_dir / f'gfa{gfa_id}{prefix}.xyls',\n format='fits', overwrite=True)\n gfa_xyls[gfa_id] = str(att_dir / f'gfa{gfa_id}{prefix}.xyls')\n\n gfa_id = int(gfa_id) # To avoid YAML serialising as numpy object\n log_input['n_stars_per_gfa'][gfa_id] = n_stars_gfa\n log_input['n_detected_per_gfa'][gfa_id] = n_detected\n\n shutil.copy(astrometry_cfg, att_dir)\n\n with open(att_dir / f'config{prefix}.yaml', 'w') as out:\n out.write(yaml.dump(log_config))\n\n log_config['output'] = {}\n log_output = log_config['output']\n\n astrometry_net = AstrometryNet()\n astrometry_net.configure(\n backend_config=att_dir / pathlib.Path(astrometry_cfg).name,\n width=config_data['gfa']['shape'][0],\n height=config_data['gfa']['shape'][1],\n no_plots=True,\n scale_low=pixel_scale * 0.9,\n scale_high=pixel_scale * 1.1,\n scale_units='arcsecperpix',\n radius=config_data['search_params']['radius'],\n dir=att_dir)\n\n if config_data['search_params']['centre_on_gfa'] is False:\n\n ra_error = 2 * (numpy.random.uniform() - 0.5)\n ra_error *= config_data['search_params']['ra_error']\n dec_error = 2 * (numpy.random.uniform() - 0.5)\n dec_error *= config_data['search_params']['dec_error']\n\n prc = astrometry_net.run(list(gfa_xyls.values()),\n stdout=att_dir / f'stdout{prefix}',\n stderr=att_dir / f'stderr{prefix}',\n ra=boresight[0] + ra_error,\n dec=boresight[1] + dec_error)\n\n log_output['solve_field_time'] = prc.time\n\n else:\n\n prc_time = 0.0\n\n for gfa_id in gfa_xyls:\n\n ra_error = 2 * (numpy.random.uniform() - 0.5)\n ra_error *= config_data['search_params']['ra_error']\n dec_error = 2 * (numpy.random.uniform() - 0.5)\n dec_error *= config_data['search_params']['dec_error']\n\n gfa_centre = gfa_centres[gfa_id]\n\n stdout = att_dir / f'stdout_gfa{gfa_id}{prefix}'\n stderr = att_dir / f'stderr_gfa{gfa_id}{prefix}'\n\n prc = astrometry_net.run([gfa_xyls[gfa_id]],\n stdout=stdout,\n stderr=stderr,\n ra=gfa_centre[0] + ra_error,\n dec=gfa_centre[1] + dec_error)\n\n prc_time += prc.time\n\n log_output['solve_field_time'] = prc_time\n\n log_output['solved'] = {i: False for i in gfa_ids}\n\n att_data['ra_solved'] = numpy.nan\n att_data['dec_solved'] = numpy.nan\n att_data['separation'] = numpy.nan\n\n for gfa_id in gfa_ids:\n\n if not (att_dir / f'gfa{gfa_id}{prefix}.solved').exists():\n continue\n\n log_output['solved'][gfa_id] = True\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n solved_wcs = astropy.wcs.WCS(str(att_dir /\n f'gfa{gfa_id}{prefix}.wcs'))\n\n gfa_idx = att_data.gfa == gfa_id\n\n radec_solved = solved_wcs.wcs_pix2world(\n att_data.loc[gfa_idx, ['x', 'y']].to_numpy(), 0)\n\n att_data.loc[gfa_idx, 'ra_solved'] = radec_solved[:, 0]\n att_data.loc[gfa_idx, 'dec_solved'] = radec_solved[:, 1]\n att_data.loc[gfa_idx, 'separation'] = sky_separation(\n att_data.loc[gfa_idx, 'ra'],\n att_data.loc[gfa_idx, 'dec'],\n att_data.loc[gfa_idx, 'ra_solved'],\n att_data.loc[gfa_idx, 'dec_solved'],\n )\n\n with open(att_dir / f'config{prefix}.yaml', 'w') as out:\n out.write(yaml.dump(log_config))\n\n att_data.to_hdf(att_dir / f'data{prefix}.out.h5', 'data')\n\n\nclass Simulation:\n \"\"\"Runs a simulation using multiprocessing.\n\n Parameters\n ----------\n fields : int or list\n Number of uniformly distributed fields to test or a list of field\n centres.\n output_dir : str\n The root of the directory structure where all the output files will\n be stored.\n observatory : str\n The observatory, either ``'apo'`` or ``'lco'``.\n config_file : str\n The path to the configuration file for the simulation.\n n_attempts : int\n Number of attempts, with randomised noise, to try per field.\n\n \"\"\"\n\n def __init__(self, fields, output_dir, observatory=None,\n config_file=None, n_attempts=10):\n\n self.output_dir = pathlib.Path(output_dir)\n\n if config_file:\n config_data = read_yaml_file(config_file)\n else:\n config_data = config\n\n self.config_data = config_data.copy()\n\n self.observatory = observatory or self.config_data['observatory']\n self.n_attempts = n_attempts\n\n numpy.random.seed(config_data['seed'])\n\n if isinstance(fields, int):\n fields = get_uniform_ra_dec(fields).tolist()\n self.fields = {fid + 1: list(map(float, fields[fid]))\n for fid in range(len(fields))}\n elif isinstance(fields, dict):\n self.fields = fields\n elif isinstance(fields, (tuple, list)):\n self.fields = {fid + 1: list(map(float, fields[fid]))\n for fid in range(len(fields))}\n\n self._data = None\n\n @classmethod\n def from_simulation(cls, path, *args, fields=None, **kwargs):\n \"\"\"Loads fields and data from a different simulation.\"\"\"\n\n path = pathlib.Path(path)\n config_path = path / 'config.yaml'\n\n if fields is None:\n config = yaml.safe_load(open(config_path))\n fields = config['fields']\n\n data = {field_id: (path / f'{field_id:05d}' / f'data_{field_id:05d}.h5')\n for field_id in fields}\n\n obj = cls(fields, *args, **kwargs)\n obj._data = data\n\n return obj\n\n def run(self, n_cpus=None, overwrite=False):\n \"\"\"Run the simulation.\n\n Parameters\n ----------\n n_cpus : int\n Number of CPUs to use. If not defined, uses all the CPUs.\n\n \"\"\"\n\n self.config_data['fields'] = self.fields\n self.config_data['n_attempts'] = self.n_attempts\n self.config_data['observatory'] = self.observatory\n\n n_cpus = n_cpus or multiprocessing.cpu_count()\n\n self.output_dir.mkdir(parents=True, exist_ok=True)\n\n config_path = self.output_dir / 'config.yaml'\n if config_path.exists() and not overwrite:\n raise RuntimeError(f'{config_path!s} already exists.')\n\n with open(config_path, 'w') as out:\n out.write(yaml.dump(self.config_data))\n\n f = functools.partial(_do_one_field, self.fields,\n self.config_data, self.observatory,\n self.output_dir, self.n_attempts,\n data=self._data, overwrite=overwrite)\n\n with tqdm.tqdm(total=len(self.fields)) as pbar:\n with multiprocessing.Pool(processes=n_cpus) as pool:\n for __ in pool.imap(f, self.fields.keys()):\n pbar.update()\n\n\ndef collate_results(path, show_progress=False):\n \"\"\"Collates the results of a simulation.\n\n Parameters\n ----------\n path : str\n The path to a completed simulation.\n show_progress : bool\n Whether to show a progress bar.\n\n Returns\n -------\n dict\n A dictionary with the collated results of the simulation.\n\n \"\"\"\n\n Row = collections.namedtuple('Row', ('observatory', 'field',\n 'attempt', 'gfa',\n 'field_ra', 'field_dec',\n 'fwhm', 'n_stars', 'n_detected',\n 'min_mag', 'max_mag',\n 'solved', 'solve_time_avg',\n 'rot', 'true_rot', 'rmse'))\n\n path = pathlib.Path(path)\n\n config = yaml.safe_load(open(path / 'config.yaml'))\n\n obs = config['observatory']\n gfa_rot = config[obs]['gfa_rot']\n\n fields = config['fields']\n n_attempts = config.get('n_attempts') or config.get('n_attamps')\n mag_column = config['mag_column']\n\n rows = [None] * len(fields) * n_attempts * len(gfa_rot)\n\n if show_progress:\n pbar = tqdm.tqdm(total=len(fields))\n else:\n pbar = None\n\n n = 0\n\n for field_id in fields:\n field_str = f'{field_id:05d}'\n\n for att in range(1, n_attempts + 1):\n\n att_str = f'{att:03d}'\n prefix = f'{field_str}_{att_str}'\n\n att_path = path / field_str / att_str\n att_config = yaml.safe_load(open(att_path / f'config_{prefix}.yaml'))\n\n data_out = pandas.read_hdf(att_path / f'data_{prefix}.out.h5')\n solve_time_avg = (att_config['output']['solve_field_time'] /\n len(att_config['input']['gfa_centres']))\n\n for gfa in att_config['input']['gfa_centres']:\n\n gfa_data = data_out.loc[data_out.gfa == gfa]\n\n solved = att_config['output']['solved'][gfa]\n\n if solved:\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n wcs = astropy.wcs.WCS(str(att_path /\n f'gfa{gfa}_{prefix}.wcs'))\n rot = get_wcs_rotation(wcs)\n rot = -rot % 360\n\n sep = gfa_data.separation\n rmse = numpy.sqrt(numpy.sum((sep * 3600.)**2) / len(sep))\n\n else:\n\n rot = None\n rmse = None\n\n row = Row(\n observatory=obs,\n field=field_id,\n attempt=att,\n gfa=gfa,\n field_ra=fields[field_id][0],\n field_dec=fields[field_id][1],\n fwhm=att_config['input']['fwhm'],\n n_stars=att_config['input']['n_stars_per_gfa'][gfa],\n n_detected=att_config['input']['n_detected_per_gfa'][gfa],\n min_mag=gfa_data[mag_column].min(),\n max_mag=gfa_data[mag_column].max(),\n solved=solved,\n solve_time_avg=solve_time_avg,\n rot=rot,\n true_rot=gfa_rot[gfa],\n rmse=rmse)\n\n rows[n] = row\n n += 1\n\n if pbar:\n pbar.update()\n\n return pandas.DataFrame(rows[0:n]).set_index(['field', 'attempt', 'gfa'])\n"
] | [
[
"pandas.read_hdf",
"numpy.radians",
"numpy.log",
"numpy.random.seed",
"numpy.arcsin",
"numpy.cos",
"pandas.DataFrame",
"numpy.sin",
"numpy.random.normal",
"numpy.log10",
"numpy.random.uniform",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
anjlip/pymatgen | [
"5cc42912a12a265a603df7e34c856561f76edc1f",
"62ecae1c7382a41861e3a5d9b9c8dd1207472409",
"62ecae1c7382a41861e3a5d9b9c8dd1207472409",
"62ecae1c7382a41861e3a5d9b9c8dd1207472409"
] | [
"pymatgen/analysis/graphs.py",
"pymatgen/io/vasp/inputs.py",
"pymatgen/analysis/elasticity/strain.py",
"pymatgen/io/feff/inputs.py"
] | [
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport warnings\nimport subprocess\nimport numpy as np\nimport os.path\nimport copy\nfrom itertools import combinations\n\nfrom pymatgen.core import Structure, Lattice, PeriodicSite, Molecule\nfrom pymatgen.core.structure import FunctionalGroups\nfrom pymatgen.util.coord import lattice_points_in_supercell\nfrom pymatgen.vis.structure_vtk import EL_COLORS\n\nfrom monty.json import MSONable\nfrom monty.os.path import which\nfrom operator import itemgetter\nfrom collections import namedtuple, defaultdict\nfrom scipy.spatial import KDTree\nfrom scipy.stats import describe\n\nimport networkx as nx\nimport networkx.algorithms.isomorphism as iso\nfrom networkx.readwrite import json_graph\nfrom networkx.drawing.nx_agraph import write_dot\n\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n__author__ = \"Matthew Horton, Evan Spotte-Smith\"\n__version__ = \"0.1\"\n__maintainer__ = \"Matthew Horton\"\n__email__ = \"[email protected]\"\n__status__ = \"Beta\"\n__date__ = \"August 2017\"\n\nConnectedSite = namedtuple('ConnectedSite', 'site, jimage, index, weight, dist')\n\n\nclass StructureGraph(MSONable):\n \"\"\"\n This is a class for annotating a Structure with\n bond information, stored in the form of a graph. A \"bond\" does\n not necessarily have to be a chemical bond, but can store any\n kind of information that connects two Sites.\n \"\"\"\n\n def __init__(self, structure, graph_data=None):\n \"\"\"\n If constructing this class manually, use the `with_empty_graph`\n method or `with_local_env_strategy` method (using an algorithm\n provided by the `local_env` module, such as O'Keeffe).\n\n This class that contains connection information:\n relationships between sites represented by a Graph structure,\n and an associated structure object.\n\n This class uses the NetworkX package to store and operate\n on the graph itself, but contains a lot of helper methods\n to make associating a graph with a given crystallographic\n structure easier.\n\n Use cases for this include storing bonding information,\n NMR J-couplings, Heisenberg exchange parameters, etc.\n\n For periodic graphs, class stores information on the graph\n edges of what lattice image the edge belongs to.\n\n :param structure: a Structure object\n\n :param graph_data: dict containing graph information in\n dict format (not intended to be constructed manually,\n see as_dict method for format)\n \"\"\"\n\n if isinstance(structure, StructureGraph):\n # just make a copy from input\n graph_data = structure.as_dict()['graphs']\n\n self.structure = structure\n self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)\n\n # tidy up edge attr dicts, reading to/from json duplicates\n # information\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if 'id' in d:\n del d['id']\n if 'key' in d:\n del d['key']\n # ensure images are tuples (conversion to lists happens\n # when serializing back from json), it's important images\n # are hashable/immutable\n if 'to_jimage' in d:\n d['to_jimage'] = tuple(d['to_jimage'])\n if 'from_jimage' in d:\n d['from_jimage'] = tuple(d['from_jimage'])\n\n @classmethod\n def with_empty_graph(cls, structure, name=\"bonds\",\n edge_weight_name=None,\n edge_weight_units=None):\n \"\"\"\n Constructor for StructureGraph, returns a StructureGraph\n object with an empty graph (no edges, only nodes defined\n that correspond to Sites in Structure).\n\n :param structure (Structure):\n :param name (str): name of graph, e.g. \"bonds\"\n :param edge_weight_name (str): name of edge weights,\n e.g. \"bond_length\" or \"exchange_constant\"\n :param edge_weight_units (str): name of edge weight units\n e.g. \"Å\" or \"eV\"\n :return (StructureGraph):\n \"\"\"\n\n if edge_weight_name and (edge_weight_units is None):\n raise ValueError(\"Please specify units associated \"\n \"with your edge weights. Can be \"\n \"empty string if arbitrary or \"\n \"dimensionless.\")\n\n # construct graph with one node per site\n # graph attributes don't change behavior of graph,\n # they're just for book-keeping\n graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,\n edge_weight_units=edge_weight_units,\n name=name)\n graph.add_nodes_from(range(len(structure)))\n\n graph_data = json_graph.adjacency_data(graph)\n\n return cls(structure, graph_data=graph_data)\n\n @staticmethod\n def with_edges(structure, edges):\n \"\"\"\n Constructor for MoleculeGraph, using pre-existing or pre-defined edges\n with optional edge parameters.\n\n :param molecule: Molecule object\n :param edges: dict representing the bonds of the functional\n group (format: {(from_index, to_index, from_image, to_image): props},\n where props is a dictionary of properties, including weight.\n Props should be None if no additional properties are to be\n specified.\n :return: sg, a StructureGraph\n \"\"\"\n\n sg = StructureGraph.with_empty_graph(structure, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for edge, props in edges.items():\n\n try:\n from_index = edge[0]\n to_index = edge[1]\n from_image = edge[2]\n to_image = edge[3]\n except TypeError:\n raise ValueError(\"Edges must be given as (from_index, to_index,\"\n \" from_image, to_image) tuples\")\n\n if props is not None:\n if \"weight\" in props.keys():\n weight = props[\"weight\"]\n del props[\"weight\"]\n else:\n weight = None\n\n if len(props.items()) == 0:\n props = None\n else:\n weight = None\n\n nodes = sg.graph.nodes\n if not (from_index in nodes and to_index in nodes):\n raise ValueError(\"Edges cannot be added if nodes are not\"\n \" present in the graph. Please check your\"\n \" indices.\")\n\n sg.add_edge(from_index, to_index, from_jimage=from_image,\n to_jimage=to_image, weight=weight,\n edge_properties=props)\n\n sg.set_node_attributes()\n return sg\n\n @staticmethod\n def with_local_env_strategy(structure, strategy):\n \"\"\"\n Constructor for StructureGraph, using a strategy\n from :Class: `pymatgen.analysis.local_env`.\n\n :param structure: Structure object\n :param strategy: an instance of a\n :Class: `pymatgen.analysis.local_env.NearNeighbors` object\n :return:\n \"\"\"\n\n sg = StructureGraph.with_empty_graph(structure, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for n, neighbors in enumerate(strategy.get_all_nn_info(structure)):\n for neighbor in neighbors:\n\n # local_env will always try to add two edges\n # for any one bond, one from site u to site v\n # and another form site v to site u: this is\n # harmless, so warn_duplicates=False\n sg.add_edge(from_index=n,\n from_jimage=(0, 0, 0),\n to_index=neighbor['site_index'],\n to_jimage=neighbor['image'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n return sg\n\n @property\n def name(self):\n \"\"\"\n :return: Name of graph\n \"\"\"\n return self.graph.graph['name']\n\n @property\n def edge_weight_name(self):\n \"\"\"\n :return: Name of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_name']\n\n @property\n def edge_weight_unit(self):\n \"\"\"\n :return: Units of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_units']\n\n def add_edge(self, from_index, to_index,\n from_jimage=(0, 0, 0), to_jimage=None,\n weight=None, warn_duplicates=True,\n edge_properties=None):\n \"\"\"\n Add edge to graph.\n\n Since physically a 'bond' (or other connection\n between sites) doesn't have a direction, from_index,\n from_jimage can be swapped with to_index, to_jimage.\n\n However, images will always always be shifted so that\n from_index < to_index and from_jimage becomes (0, 0, 0).\n\n :param from_index: index of site connecting from\n :param to_index: index of site connecting to\n :param from_jimage (tuple of ints): lattice vector of periodic\n image, e.g. (1, 0, 0) for periodic image in +x direction\n :param to_jimage (tuple of ints): lattice vector of image\n :param weight (float): e.g. bond length\n :param warn_duplicates (bool): if True, will warn if\n trying to add duplicate edges (duplicate edges will not\n be added in either case)\n :param edge_properties (dict): any other information to\n store on graph edges, similar to Structure's site_properties\n :return:\n \"\"\"\n\n # this is not necessary for the class to work, but\n # just makes it neater\n if to_index < from_index:\n to_index, from_index = from_index, to_index\n to_jimage, from_jimage = from_jimage, to_jimage\n\n # constrain all from_jimages to be (0, 0, 0),\n # initial version of this class worked even if\n # from_jimage != (0, 0, 0), but making this\n # assumption simplifies logic later\n if not np.array_equal(from_jimage, (0, 0, 0)):\n shift = from_jimage\n from_jimage = np.subtract(from_jimage, shift)\n to_jimage = np.subtract(to_jimage, shift)\n\n # automatic detection of to_jimage if user doesn't specify\n # will try and detect all equivalent images and add multiple\n # edges if appropriate\n if to_jimage is None:\n # assume we want the closest site\n warnings.warn(\"Please specify to_jimage to be unambiguous, \"\n \"trying to automatically detect.\")\n dist, to_jimage = self.structure[from_index]\\\n .distance_and_image(self.structure[to_index])\n if dist == 0:\n # this will happen when from_index == to_index,\n # typically in primitive single-atom lattices\n images = [1, 0, 0], [0, 1, 0], [0, 0, 1]\n dists = []\n for image in images:\n dists.append(self.structure[from_index]\n .distance_and_image(self.structure[from_index],\n jimage=image)[0])\n dist = min(dists)\n equiv_sites = self.structure.get_neighbors_in_shell(self.structure[from_index].coords,\n dist,\n dist*0.01,\n include_index=True)\n for site, dist, to_index in equiv_sites:\n to_jimage = np.subtract(site.frac_coords, self.structure[from_index].frac_coords)\n to_jimage = to_jimage.astype(int)\n self.add_edge(from_index=from_index, from_jimage=(0, 0, 0),\n to_jimage=to_jimage, to_index=to_index)\n return\n\n # sanitize types\n from_jimage, to_jimage = tuple(map(int, from_jimage)), tuple(map(int, to_jimage))\n from_index, to_index = int(from_index), int(to_index)\n\n # check we're not trying to add a duplicate edge\n # there should only ever be at most one edge\n # between a given (site, jimage) pair and another\n # (site, jimage) pair\n existing_edge_data = self.graph.get_edge_data(from_index, to_index)\n if existing_edge_data:\n for key, d in existing_edge_data.items():\n if d[\"to_jimage\"] == to_jimage:\n if warn_duplicates:\n warnings.warn(\"Trying to add an edge that already exists from \"\n \"site {} to site {} in {}.\".format(from_index,\n to_index,\n to_jimage))\n return\n\n # generic container for additional edge properties,\n # similar to site properties\n edge_properties = edge_properties or {}\n\n if weight:\n self.graph.add_edge(from_index, to_index,\n to_jimage=to_jimage,\n weight=weight,\n **edge_properties)\n else:\n self.graph.add_edge(from_index, to_index,\n to_jimage=to_jimage,\n **edge_properties)\n\n def insert_node(self, i, species, coords, coords_are_cartesian=False,\n validate_proximity=False, site_properties=None, edges=None):\n \"\"\"\n A wrapper around Molecule.insert(), which also incorporates the new\n site into the MoleculeGraph.\n\n :param i: Index at which to insert the new site\n :param species: Species for the new site\n :param coords: 3x1 array representing coordinates of the new site\n :param coords_are_cartesian: Whether coordinates are cartesian.\n Defaults to False.\n :param validate_proximity: For Molecule.insert(); if True (default\n False), distance will be checked to ensure that site can be safely\n added.\n :param site_properties: Site properties for Molecule\n :param edges: List of dicts representing edges to be added to the\n MoleculeGraph. These edges must include the index of the new site i,\n and all indices used for these edges should reflect the\n MoleculeGraph AFTER the insertion, NOT before. Each dict should at\n least have a \"to_index\" and \"from_index\" key, and can also have a\n \"weight\" and a \"properties\" key.\n :return:\n \"\"\"\n\n self.structure.insert(i, species, coords,\n coords_are_cartesian=coords_are_cartesian,\n validate_proximity=validate_proximity,\n properties=site_properties)\n\n mapping = {}\n for j in range(len(self.structure) - 1):\n if j < i:\n mapping[j] = j\n else:\n mapping[j] = j + 1\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n self.graph.add_node(i)\n self.set_node_attributes()\n\n if edges is not None:\n for edge in edges:\n try:\n self.add_edge(edge[\"from_index\"], edge[\"to_index\"],\n from_jimage=(0, 0, 0),\n to_jimage=edge[\"to_jimage\"],\n weight=edge.get(\"weight\", None),\n edge_properties=edge.get(\"properties\", None))\n except KeyError:\n raise RuntimeError(\"Some edges are invalid.\")\n\n def set_node_attributes(self):\n \"\"\"\n Gives each node a \"specie\" and a \"coords\" attribute, updated with the\n current species and coordinates.\n\n :return:\n \"\"\"\n\n species = {}\n coords = {}\n properties = {}\n for node in self.graph.nodes():\n species[node] = self.structure[node].specie.symbol\n coords[node] = self.structure[node].coords\n properties[node] = self.structure[node].properties\n\n nx.set_node_attributes(self.graph, species, \"specie\")\n nx.set_node_attributes(self.graph, coords, \"coords\")\n nx.set_node_attributes(self.graph, properties, \"properties\")\n\n def alter_edge(self, from_index, to_index, to_jimage=None,\n new_weight=None, new_edge_properties=None):\n \"\"\"\n Alters either the weight or the edge_properties of\n an edge in the StructureGraph.\n\n :param from_index: int\n :param to_index: int\n :param to_jimage: tuple\n :param new_weight: alter_edge does not require\n that weight be altered. As such, by default, this\n is None. If weight is to be changed, it should be a\n float.\n :param new_edge_properties: alter_edge does not require\n that edge_properties be altered. As such, by default,\n this is None. If any edge properties are to be changed,\n it should be a dictionary of edge properties to be changed.\n :return:\n \"\"\"\n\n existing_edges = self.graph.get_edge_data(from_index, to_index)\n\n # ensure that edge exists before attempting to change it\n if not existing_edges:\n raise ValueError(\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n if to_jimage is None:\n edge_index = 0\n else:\n for i, properties in existing_edges.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n if new_weight is not None:\n self.graph[from_index][to_index][edge_index]['weight'] = new_weight\n\n if new_edge_properties is not None:\n for prop in list(new_edge_properties.keys()):\n self.graph[from_index][to_index][edge_index][prop] = new_edge_properties[prop]\n\n def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False):\n \"\"\"\n Remove an edge from the StructureGraph. If no image is given, this method will fail.\n\n :param from_index: int\n :param to_index: int\n :param to_jimage: tuple\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return:\n \"\"\"\n\n # ensure that edge exists before attempting to remove it\n existing_edges = self.graph.get_edge_data(from_index, to_index)\n existing_reverse = None\n\n if to_jimage is None:\n raise ValueError(\"Image must be supplied, to avoid ambiguity.\")\n\n if existing_edges:\n for i, properties in existing_edges.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n self.graph.remove_edge(from_index, to_index, edge_index)\n\n else:\n if allow_reverse:\n existing_reverse = self.graph.get_edge_data(to_index, from_index)\n\n if existing_reverse:\n for i, properties in existing_reverse.items():\n if properties[\"to_jimage\"] == to_jimage:\n edge_index = i\n\n self.graph.remove_edge(to_index, from_index, edge_index)\n else:\n raise ValueError(\"Edge cannot be broken between {} and {};\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n def remove_nodes(self, indices):\n \"\"\"\n A wrapper for Molecule.remove_sites().\n\n :param indices: list of indices in the current Molecule (and graph) to\n be removed.\n :return:\n \"\"\"\n\n self.structure.remove_sites(indices)\n self.graph.remove_nodes_from(indices)\n\n mapping = {}\n for correct, current in enumerate(sorted(self.graph.nodes)):\n mapping[current] = correct\n\n nx.relabel_nodes(self.graph, mapping, copy=False)\n self.set_node_attributes()\n\n def substitute_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None):\n \"\"\"\n Builds off of Structure.substitute to replace an atom in self.structure\n with a functional group. This method also amends self.graph to\n incorporate the new functional group.\n\n NOTE: Care must be taken to ensure that the functional group that is\n substituted will not place atoms to close to each other, or violate the\n dimensions of the Lattice.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are two options:\n\n 1. Providing an actual Molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :return:\n \"\"\"\n\n def map_indices(grp):\n grp_map = {}\n\n # Get indices now occupied by functional group\n # Subtracting 1 because the dummy atom X should not count\n atoms = len(grp) - 1\n offset = len(self.structure) - atoms\n\n for i in range(atoms):\n grp_map[i] = i + offset\n\n return grp_map\n\n if isinstance(func_grp, Molecule):\n func_grp = copy.deepcopy(func_grp)\n else:\n try:\n func_grp = copy.deepcopy(FunctionalGroups[func_grp])\n except:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n\n self.structure.substitute(index, func_grp, bond_order=bond_order)\n\n mapping = map_indices(func_grp)\n\n # Remove dummy atom \"X\"\n func_grp.remove_species(\"X\")\n\n if graph_dict is not None:\n for (u, v) in graph_dict.keys():\n edge_props = graph_dict[(u, v)]\n if \"to_jimage\" in edge_props.keys():\n to_jimage = edge_props[\"to_jimage\"]\n del edge_props[\"to_jimage\"]\n else:\n # By default, assume that all edges should stay remain\n # inside the initial image\n to_jimage = (0, 0, 0)\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v], to_jimage=to_jimage,\n weight=weight, edge_properties=edge_props)\n\n else:\n if strategy_params is None:\n strategy_params = {}\n strat = strategy(**strategy_params)\n\n for site in mapping.values():\n neighbors = strat.get_nn_info(self.structure, site)\n\n for neighbor in neighbors:\n self.add_edge(from_index=site,\n from_jimage=(0, 0, 0),\n to_index=neighbor['site_index'],\n to_jimage=neighbor['image'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n def get_connected_sites(self, n, jimage=(0, 0, 0)):\n \"\"\"\n Returns a named tuple of neighbors of site n:\n periodic_site, jimage, index, weight.\n Index is the index of the corresponding site\n in the original structure, weight can be\n None if not defined.\n :param n: index of Site in Structure\n :param jimage: lattice vector of site\n :return: list of ConnectedSite tuples,\n sorted by closest first\n \"\"\"\n\n connected_sites = set()\n connected_site_images = set()\n\n out_edges = [(u, v, d, 'out') for u, v, d in self.graph.out_edges(n, data=True)]\n in_edges = [(u, v, d, 'in') for u, v, d in self.graph.in_edges(n, data=True)]\n\n for u, v, d, dir in out_edges + in_edges:\n\n to_jimage = d['to_jimage']\n\n if dir == 'in':\n u, v = v, u\n to_jimage = np.multiply(-1, to_jimage)\n\n to_jimage = tuple(map(int, np.add(to_jimage, jimage)))\n site_d = self.structure[v].as_dict()\n site_d['abc'] = np.add(site_d['abc'], to_jimage).tolist()\n site = PeriodicSite.from_dict(site_d)\n\n # from_site if jimage arg != (0, 0, 0)\n relative_jimage = np.subtract(to_jimage, jimage)\n dist = self.structure[u].distance(self.structure[v], jimage=relative_jimage)\n\n weight = d.get('weight', None)\n\n if (v, to_jimage) not in connected_site_images:\n\n connected_site = ConnectedSite(site=site,\n jimage=to_jimage,\n index=v,\n weight=weight,\n dist=dist)\n\n connected_sites.add(connected_site)\n connected_site_images.add((v, to_jimage))\n\n # return list sorted by closest sites first\n connected_sites = list(connected_sites)\n connected_sites.sort(key=lambda x: x.dist)\n\n return connected_sites\n\n def get_coordination_of_site(self, n):\n \"\"\"\n Returns the number of neighbors of site n.\n In graph terms, simply returns degree\n of node corresponding to site n.\n :param n: index of site\n :return (int):\n \"\"\"\n number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])\n return self.graph.degree(n) - number_of_self_loops\n\n def draw_graph_to_file(self, filename=\"graph\",\n diff=None,\n hide_unconnected_nodes=False,\n hide_image_edges=True,\n edge_colors=False,\n node_labels=False,\n weight_labels=False,\n image_labels=False,\n color_scheme=\"VESTA\",\n keep_dot=False,\n algo=\"fdp\"):\n \"\"\"\n Draws graph using GraphViz.\n\n The networkx graph object itself can also be drawn\n with networkx's in-built graph drawing methods, but\n note that this might give misleading results for\n multigraphs (edges are super-imposed on each other).\n\n If visualization is difficult to interpret,\n `hide_image_edges` can help, especially in larger\n graphs.\n\n :param filename: filename to output, will detect filetype\n from extension (any graphviz filetype supported, such as\n pdf or png)\n :param diff (StructureGraph): an additional graph to\n compare with, will color edges red that do not exist in diff\n and edges green that are in diff graph but not in the\n reference graph\n :param hide_unconnected_nodes: if True, hide unconnected\n nodes\n :param hide_image_edges: if True, do not draw edges that\n go through periodic boundaries\n :param edge_colors (bool): if True, use node colors to\n color edges\n :param node_labels (bool): if True, label nodes with\n species and site index\n :param weight_labels (bool): if True, label edges with\n weights\n :param image_labels (bool): if True, label edges with\n their periodic images (usually only used for debugging,\n edges to periodic images always appear as dashed lines)\n :param color_scheme (str): \"VESTA\" or \"JMOL\"\n :param keep_dot (bool): keep GraphViz .dot file for later\n visualization\n :param algo: any graphviz algo, \"neato\" (for simple graphs)\n or \"fdp\" (for more crowded graphs) usually give good outputs\n :return:\n \"\"\"\n\n if not which(algo):\n raise RuntimeError(\"StructureGraph graph drawing requires \"\n \"GraphViz binaries to be in the path.\")\n\n # Developer note: NetworkX also has methods for drawing\n # graphs using matplotlib, these also work here. However,\n # a dedicated tool like GraphViz allows for much easier\n # control over graph appearance and also correctly displays\n # mutli-graphs (matplotlib can superimpose multiple edges).\n\n g = self.graph.copy()\n\n g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': \"false\"}\n\n # add display options for nodes\n for n in g.nodes():\n\n # get label by species name\n label = \"{}({})\".format(str(self.structure[n].specie), n) if node_labels else \"\"\n\n # use standard color scheme for nodes\n c = EL_COLORS[color_scheme].get(str(self.structure[n].specie.symbol), [0, 0, 0])\n\n # get contrasting font color\n # magic numbers account for perceived luminescence\n # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color\n fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587\n + c[2] * 0.114) / 255 < 0.5 else '#ffffff'\n\n # convert color to hex string\n color = \"#{:02x}{:02x}{:02x}\".format(c[0], c[1], c[2])\n\n g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,\n fontname=\"Helvetica-bold\", style=\"filled\", shape=\"circle\")\n\n edges_to_delete = []\n\n # add display options for edges\n for u, v, k, d in g.edges(keys=True, data=True):\n\n # retrieve from/to images, set as origin if not defined\n to_image = d['to_jimage']\n\n # set edge style\n d['style'] = \"solid\"\n if to_image != (0, 0, 0):\n d['style'] = \"dashed\"\n if hide_image_edges:\n edges_to_delete.append((u, v, k))\n\n # don't show edge directions\n d['arrowhead'] = \"none\"\n\n # only add labels for images that are not the origin\n if image_labels:\n d['headlabel'] = \"\" if to_image == (0, 0, 0) else \"to {}\".format((to_image))\n d['arrowhead'] = \"normal\" if d['headlabel'] else \"none\"\n\n # optionally color edges using node colors\n color_u = g.node[u]['fillcolor']\n color_v = g.node[v]['fillcolor']\n d['color_uv'] = \"{};0.5:{};0.5\".format(color_u, color_v) if edge_colors else \"#000000\"\n\n # optionally add weights to graph\n if weight_labels:\n units = g.graph.get('edge_weight_units', \"\")\n if d.get('weight'):\n d['label'] = \"{:.2f} {}\".format(d['weight'], units)\n\n # update edge with our new style attributes\n g.edges[u, v, k].update(d)\n\n # optionally remove periodic image edges,\n # these can be confusing due to periodic boundaries\n if hide_image_edges:\n for edge_to_delete in edges_to_delete:\n g.remove_edge(*edge_to_delete)\n\n # optionally hide unconnected nodes,\n # these can appear when removing periodic edges\n if hide_unconnected_nodes:\n g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])\n\n # optionally highlight differences with another graph\n if diff:\n diff = self.diff(diff, strict=True)\n green_edges = []\n red_edges = []\n for u, v, k, d in g.edges(keys=True, data=True):\n if (u, v, d['to_jimage']) in diff['self']:\n # edge has been deleted\n red_edges.append((u, v, k))\n elif (u, v, d['to_jimage']) in diff['other']:\n # edge has been added\n green_edges.append((u, v, k))\n for u, v, k in green_edges:\n g.edges[u, v, k].update({'color_uv': '#00ff00'})\n for u, v, k in red_edges:\n g.edges[u, v, k].update({'color_uv': '#ff0000'})\n\n basename, extension = os.path.splitext(filename)\n extension = extension[1:]\n\n write_dot(g, basename+\".dot\")\n\n with open(filename, \"w\") as f:\n\n args = [algo, \"-T\", extension, basename+\".dot\"]\n rs = subprocess.Popen(args,\n stdout=f,\n stdin=subprocess.PIPE, close_fds=True)\n rs.communicate()\n if rs.returncode != 0:\n raise RuntimeError(\"{} exited with return code {}.\".format(algo, rs.returncode))\n\n if not keep_dot:\n os.remove(basename+\".dot\")\n\n @property\n def types_and_weights_of_connections(self):\n \"\"\"\n Extract a dictionary summarizing the types and weights\n of edges in the graph.\n\n :return: A dictionary with keys specifying the\n species involved in a connection in alphabetical order\n (e.g. string 'Fe-O') and values which are a list of\n weights for those connections (e.g. bond lengths).\n \"\"\"\n def get_label(u, v):\n u_label = self.structure[u].species_string\n v_label = self.structure[v].species_string\n return \"-\".join(sorted((u_label, v_label)))\n\n types = defaultdict(list)\n for u, v, d in self.graph.edges(data=True):\n label = get_label(u, v)\n types[label].append(d['weight'])\n\n return dict(types)\n\n @property\n def weight_statistics(self):\n \"\"\"\n Extract a statistical summary of edge weights present in\n the graph.\n\n :return: A dict with an 'all_weights' list, 'minimum',\n 'maximum', 'median', 'mean', 'std_dev'\n \"\"\"\n\n all_weights = [d.get('weight', None) for u, v, d\n in self.graph.edges(data=True)]\n stats = describe(all_weights, nan_policy='omit')\n\n return {\n 'all_weights': all_weights,\n 'min': stats.minmax[0],\n 'max': stats.minmax[1],\n 'mean': stats.mean,\n 'variance': stats.variance\n }\n\n def types_of_coordination_environments(self, anonymous=False):\n \"\"\"\n Extract information on the different co-ordination environments\n present in the graph.\n\n :param anonymous: if anonymous, will replace specie names\n with A, B, C, etc.\n :return: a list of co-ordination environments,\n e.g. ['Mo-S(6)', 'S-Mo(3)']\n \"\"\"\n\n motifs = set()\n for idx, site in enumerate(self.structure):\n\n centre_sp = site.species_string\n\n connected_sites = self.get_connected_sites(idx)\n connected_species = [connected_site.site.species_string\n for connected_site in connected_sites]\n\n labels = []\n for sp in set(connected_species):\n count = connected_species.count(sp)\n labels.append((count, sp))\n\n labels = sorted(labels, reverse=True)\n\n if anonymous:\n mapping = {centre_sp: 'A'}\n available_letters = [chr(66+i) for i in range(25)]\n for label in labels:\n sp = label[1]\n if sp not in mapping:\n mapping[sp] = available_letters.pop(0)\n centre_sp = 'A'\n labels = [(label[0], mapping[label[1]]) for label in labels]\n\n labels = [\"{}({})\".format(label[1], label[0]) for label in labels]\n motif = '{}-{}'.format(centre_sp, ','.join(labels))\n motifs.add(motif)\n\n return sorted(list(motifs))\n\n def as_dict(self):\n \"\"\"\n As in :Class: `pymatgen.core.Structure` except\n with using `to_dict_of_dicts` from NetworkX\n to store graph information.\n \"\"\"\n\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"graphs\": json_graph.adjacency_data(self.graph)}\n\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n As in :Class: `pymatgen.core.Structure` except\n restoring graphs using `from_dict_of_dicts`\n from NetworkX to restore graph information.\n \"\"\"\n s = Structure.from_dict(d['structure'])\n return cls(s, d['graphs'])\n\n def __mul__(self, scaling_matrix):\n \"\"\"\n Replicates the graph, creating a supercell,\n intelligently joining together\n edges that lie on periodic boundaries.\n In principle, any operations on the expanded\n graph could also be done on the original\n graph, but a larger graph can be easier to\n visualize and reason about.\n :param scaling_matrix: same as Structure.__mul__\n :return:\n \"\"\"\n\n # Developer note: a different approach was also trialed, using\n # a simple Graph (instead of MultiDiGraph), with node indices\n # representing both site index and periodic image. Here, the\n # number of nodes != number of sites in the Structure. This\n # approach has many benefits, but made it more difficult to\n # keep the graph in sync with its corresponding Structure.\n\n # Broadly, it would be easier to multiply the Structure\n # *before* generating the StructureGraph, but this isn't\n # possible when generating the graph using critic2 from\n # charge density.\n\n # Multiplication works by looking for the expected position\n # of an image node, and seeing if that node exists in the\n # supercell. If it does, the edge is updated. This is more\n # computationally expensive than just keeping track of the\n # which new lattice images present, but should hopefully be\n # easier to extend to a general 3x3 scaling matrix.\n\n # code adapted from Structure.__mul__\n scale_matrix = np.array(scaling_matrix, np.int16)\n if scale_matrix.shape != (3, 3):\n scale_matrix = np.array(scale_matrix * np.eye(3), np.int16)\n else:\n # TODO: test __mul__ with full 3x3 scaling matrices\n raise NotImplementedError('Not tested with 3x3 scaling matrices yet.')\n new_lattice = Lattice(np.dot(scale_matrix, self.structure.lattice.matrix))\n\n f_lat = lattice_points_in_supercell(scale_matrix)\n c_lat = new_lattice.get_cartesian_coords(f_lat)\n\n new_sites = []\n new_graphs = []\n\n for v in c_lat:\n\n # create a map of nodes from original graph to its image\n mapping = {n: n + len(new_sites) for n in range(len(self.structure))}\n\n for idx, site in enumerate(self.structure):\n\n s = PeriodicSite(site.species, site.coords + v,\n new_lattice, properties=site.properties,\n coords_are_cartesian=True, to_unit_cell=False)\n\n new_sites.append(s)\n\n new_graphs.append(nx.relabel_nodes(self.graph, mapping, copy=True))\n\n new_structure = Structure.from_sites(new_sites)\n\n # merge all graphs into one big graph\n new_g = nx.MultiDiGraph()\n for new_graph in new_graphs:\n new_g = nx.union(new_g, new_graph)\n\n edges_to_remove = [] # tuple of (u, v, k)\n edges_to_add = [] # tuple of (u, v, attr_dict)\n\n # list of new edges inside supercell\n # for duplicate checking\n edges_inside_supercell = [{u, v} for u, v, d in new_g.edges(data=True)\n if d['to_jimage'] == (0, 0, 0)]\n new_periodic_images = []\n\n orig_lattice = self.structure.lattice\n\n # use k-d tree to match given position to an\n # existing Site in Structure\n kd_tree = KDTree(new_structure.cart_coords)\n\n # tolerance in Å for sites to be considered equal\n # this could probably be a lot smaller\n tol = 0.05\n\n for u, v, k, d in new_g.edges(keys=True, data=True):\n\n to_jimage = d['to_jimage'] # for node v\n\n # reduce unnecessary checking\n if to_jimage != (0, 0, 0):\n\n # get index in original site\n n_u = u % len(self.structure)\n n_v = v % len(self.structure)\n\n # get fractional co-ordinates of where atoms defined\n # by edge are expected to be, relative to original\n # lattice (keeping original lattice has\n # significant benefits)\n v_image_frac = np.add(self.structure[n_v].frac_coords, to_jimage)\n u_frac = self.structure[n_u].frac_coords\n\n # using the position of node u as a reference,\n # get relative Cartesian co-ordinates of where\n # atoms defined by edge are expected to be\n v_image_cart = orig_lattice.get_cartesian_coords(v_image_frac)\n u_cart = orig_lattice.get_cartesian_coords(u_frac)\n v_rel = np.subtract(v_image_cart, u_cart)\n\n # now retrieve position of node v in\n # new supercell, and get asgolute Cartesian\n # co-ordinates of where atoms defined by edge\n # are expected to be\n v_expec = new_structure[u].coords + v_rel\n\n # now search in new structure for these atoms\n # query returns (distance, index)\n v_present = kd_tree.query(v_expec)\n v_present = v_present[1] if v_present[0] <= tol else None\n\n # check if image sites now present in supercell\n # and if so, delete old edge that went through\n # periodic boundary\n if v_present is not None:\n\n new_u = u\n new_v = v_present\n new_d = d.copy()\n\n # node now inside supercell\n new_d['to_jimage'] = (0, 0, 0)\n\n edges_to_remove.append((u, v, k))\n\n # make sure we don't try to add duplicate edges\n # will remove two edges for everyone one we add\n if {new_u, new_v} not in edges_inside_supercell:\n\n # normalize direction\n if new_v < new_u:\n new_u, new_v = new_v, new_u\n\n edges_inside_supercell.append({new_u, new_v})\n edges_to_add.append((new_u, new_v, new_d))\n\n else:\n\n # want to find new_v such that we have\n # full periodic boundary conditions\n # so that nodes on one side of supercell\n # are connected to nodes on opposite side\n\n v_expec_frac = new_structure.lattice.get_fractional_coords(v_expec)\n\n # find new to_jimage\n # use np.around to fix issues with finite precision leading to incorrect image\n v_expec_image = np.around(v_expec_frac, decimals=3)\n v_expec_image = v_expec_image - v_expec_image%1\n\n v_expec_frac = np.subtract(v_expec_frac, v_expec_image)\n v_expec = new_structure.lattice.get_cartesian_coords(v_expec_frac)\n v_present = kd_tree.query(v_expec)\n v_present = v_present[1] if v_present[0] <= tol else None\n\n if v_present is not None:\n\n new_u = u\n new_v = v_present\n new_d = d.copy()\n new_to_jimage = tuple(map(int, v_expec_image))\n\n # normalize direction\n if new_v < new_u:\n new_u, new_v = new_v, new_u\n new_to_jimage = tuple(np.multiply(-1, d['to_jimage']).astype(int))\n\n new_d['to_jimage'] = new_to_jimage\n\n edges_to_remove.append((u, v, k))\n\n if (new_u, new_v, new_to_jimage) not in new_periodic_images:\n edges_to_add.append((new_u, new_v, new_d))\n new_periodic_images.append((new_u, new_v, new_to_jimage))\n\n logger.debug(\"Removing {} edges, adding {} new edges.\".format(len(edges_to_remove),\n len(edges_to_add)))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n new_g.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n new_g.add_edge(u, v, **d)\n\n # return new instance of StructureGraph with supercell\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": new_structure.as_dict(),\n \"graphs\": json_graph.adjacency_data(new_g)}\n\n sg = StructureGraph.from_dict(d)\n\n return sg\n\n def __rmul__(self, other):\n return self.__mul__(other)\n\n def _edges_to_string(self, g):\n\n header = \"from to to_image \"\n header_line = \"---- ---- ------------\"\n edge_weight_name = g.graph[\"edge_weight_name\"]\n if edge_weight_name:\n print_weights = [\"weight\"]\n edge_label = g.graph[\"edge_weight_name\"]\n edge_weight_units = g.graph[\"edge_weight_units\"]\n if edge_weight_units:\n edge_label += \" ({})\".format(edge_weight_units)\n header += \" {}\".format(edge_label)\n header_line += \" {}\".format(\"-\"*max([18, len(edge_label)]))\n else:\n print_weights = False\n\n s = header + \"\\n\" + header_line + \"\\n\"\n\n edges = list(g.edges(data=True))\n\n # sort edges for consistent ordering\n edges.sort(key=itemgetter(0,1))\n\n if print_weights:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12} {:.3e}\\n\".format(u, v, str(data.get(\"to_jimage\", (0, 0, 0))),\n data.get(\"weight\", 0))\n else:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12}\\n\".format(u, v,\n str(data.get(\"to_jimage\", (0, 0, 0))))\n\n return s\n\n def __str__(self):\n s = \"Structure Graph\"\n s += \"\\nStructure: \\n{}\".format(self.structure.__str__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __repr__(self):\n s = \"Structure Graph\"\n s += \"\\nStructure: \\n{}\".format(self.structure.__repr__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __len__(self):\n \"\"\"\n :return: length of Structure / number of nodes in graph\n \"\"\"\n return len(self.structure)\n\n def sort(self, key=None, reverse=False):\n \"\"\"\n Same as Structure.sort(), also remaps nodes in graph.\n :param key:\n :param reverse:\n :return:\n \"\"\"\n\n old_structure = self.structure.copy()\n\n # sort Structure\n self.structure._sites = sorted(self.structure._sites, key=key, reverse=reverse)\n\n # apply Structure ordering to graph\n mapping = {idx:self.structure.index(site) for idx, site in enumerate(old_structure)}\n self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)\n\n # normalize directions of edges\n edges_to_remove = []\n edges_to_add = []\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if v < u:\n new_v, new_u, new_d = u, v, d.copy()\n new_d['to_jimage'] = tuple(np.multiply(-1, d['to_jimage']).astype(int))\n edges_to_remove.append((u, v, k))\n edges_to_add.append((new_u, new_v, new_d))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n self.graph.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n self.graph.add_edge(u, v, **d)\n\n def __copy__(self):\n return StructureGraph.from_dict(self.as_dict())\n\n def __eq__(self, other):\n \"\"\"\n Two StructureGraphs are equal if they have equal Structures,\n and have the same edges between Sites. Edge weights can be\n different and StructureGraphs can still be considered equal.\n\n :param other: StructureGraph\n :return (bool):\n \"\"\"\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d['to_jimage'])\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d['to_jimage'])\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n return (edges == edges_other) and \\\n (self.structure == other_sorted.structure)\n\n def diff(self, other, strict=True):\n \"\"\"\n Compares two StructureGraphs. Returns dict with\n keys 'self', 'other', 'both' with edges that are\n present in only one StructureGraph ('self' and\n 'other'), and edges that are present in both.\n\n The Jaccard distance is a simple measure of the\n dissimilarity between two StructureGraphs (ignoring\n edge weights), and is defined by 1 - (size of the\n intersection / size of the union) of the sets of\n edges. This is returned with key 'dist'.\n\n Important note: all node indices are in terms\n of the StructureGraph this method is called\n from, not the 'other' StructureGraph: there\n is no guarantee the node indices will be the\n same if the underlying Structures are ordered\n differently.\n\n :param other: StructureGraph\n :param strict: if False, will compare bonds\n from different Structures, with node indices\n replaced by Specie strings, will not count\n number of occurrences of bonds\n :return:\n \"\"\"\n\n if self.structure != other.structure and strict:\n return ValueError(\"Meaningless to compare StructureGraphs if \"\n \"corresponding Structures are different.\")\n\n if strict:\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.structure.index(site) for site in other.structure}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d['to_jimage'])\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d['to_jimage'])\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n else:\n\n edges = {(str(self.structure[u].specie),\n str(self.structure[v].specie))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(str(other.structure[u].specie),\n str(other.structure[v].specie))\n for u, v, d in other.graph.edges(keys=False, data=True)}\n\n if len(edges) == 0 and len(edges_other) == 0:\n jaccard_dist = 0 # by definition\n else:\n jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))\n\n return {\n 'self': edges - edges_other,\n 'other': edges_other - edges,\n 'both': edges.intersection(edges_other),\n 'dist': jaccard_dist\n }\n\n def get_subgraphs_as_molecules(self, use_weights=False):\n \"\"\"\n Retrieve subgraphs as molecules, useful for extracting\n molecules from periodic crystals.\n\n Will only return unique molecules, not any duplicates\n present in the crystal (a duplicate defined as an\n isomorphic subgraph).\n\n :param use_weights (bool): If True, only treat subgraphs\n as isomorphic if edges have the same weights. Typically,\n this means molecules will need to have the same bond\n lengths to be defined as duplicates, otherwise bond\n lengths can differ. This is a fairly robust approach,\n but will treat e.g. enantiomers as being duplicates.\n\n :return: list of unique Molecules in Structure\n \"\"\"\n\n # creating a supercell is an easy way to extract\n # molecules (and not, e.g., layers of a 2D crystal)\n # without adding extra logic\n if getattr(self, '_supercell_sg', None) is None:\n self._supercell_sg = supercell_sg = self*(3,3,3)\n\n # make undirected to find connected subgraphs\n supercell_sg.graph = nx.Graph(supercell_sg.graph)\n\n # find subgraphs\n all_subgraphs = list(nx.connected_component_subgraphs(supercell_sg.graph))\n\n # discount subgraphs that lie across *supercell* boundaries\n # these will subgraphs representing crystals\n molecule_subgraphs = []\n for subgraph in all_subgraphs:\n intersects_boundary = any([d['to_jimage'] != (0, 0, 0)\n for u, v, d in subgraph.edges(data=True)])\n if not intersects_boundary:\n molecule_subgraphs.append(subgraph)\n\n # add specie names to graph to be able to test for isomorphism\n for subgraph in molecule_subgraphs:\n for n in subgraph:\n subgraph.add_node(n, specie=str(supercell_sg.structure[n].specie))\n\n # now define how we test for isomorphism\n def node_match(n1, n2):\n return n1['specie'] == n2['specie']\n def edge_match(e1, e2):\n if use_weights:\n return e1['weight'] == e2['weight']\n else:\n return True\n\n # prune duplicate subgraphs\n unique_subgraphs = []\n for subgraph in molecule_subgraphs:\n\n already_present = [nx.is_isomorphic(subgraph, g,\n node_match=node_match,\n edge_match=edge_match)\n for g in unique_subgraphs]\n\n if not any(already_present):\n unique_subgraphs.append(subgraph)\n\n # get Molecule objects for each subgraph\n molecules = []\n for subgraph in unique_subgraphs:\n\n coords = [supercell_sg.structure[n].coords for n\n in subgraph.nodes()]\n species = [supercell_sg.structure[n].specie for n\n in subgraph.nodes()]\n\n molecule = Molecule(species, coords)\n\n # shift so origin is at center of mass\n molecule = molecule.get_centered_molecule()\n\n molecules.append(molecule)\n\n return molecules\n\n\nclass MolGraphSplitError(Exception):\n # Raised when a molecule graph is failed to split into two disconnected\n # subgraphs\n pass\n\n\nclass MoleculeGraph(MSONable):\n \"\"\"\n This is a class for annotating a Molecule with\n bond information, stored in the form of a graph. A \"bond\" does\n not necessarily have to be a chemical bond, but can store any\n kind of information that connects two Sites.\n \"\"\"\n\n def __init__(self, molecule, graph_data=None):\n \"\"\"\n If constructing this class manually, use the `with_empty_graph`\n method or `with_local_env_strategy` method (using an algorithm\n provided by the `local_env` module, such as O'Keeffe).\n\n This class that contains connection information:\n relationships between sites represented by a Graph structure,\n and an associated structure object.\n\n This class uses the NetworkX package to store and operate\n on the graph itself, but contains a lot of helper methods\n to make associating a graph with a given molecule easier.\n\n Use cases for this include storing bonding information,\n NMR J-couplings, Heisenberg exchange parameters, etc.\n\n :param molecule: Molecule object\n\n :param graph_data: dict containing graph information in\n dict format (not intended to be constructed manually,\n see as_dict method for format)\n \"\"\"\n\n if isinstance(molecule, MoleculeGraph):\n # just make a copy from input\n graph_data = molecule.as_dict()['graphs']\n\n self.molecule = molecule\n self.graph = nx.readwrite.json_graph.adjacency_graph(graph_data)\n\n # tidy up edge attr dicts, reading to/from json duplicates\n # information\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if 'id' in d:\n del d['id']\n if 'key' in d:\n del d['key']\n # ensure images are tuples (conversion to lists happens\n # when serializing back from json), it's important images\n # are hashable/immutable\n if 'to_jimage' in d:\n d['to_jimage'] = tuple(d['to_jimage'])\n if 'from_jimage' in d:\n d['from_jimage'] = tuple(d['from_jimage'])\n\n self.set_node_attributes()\n\n @classmethod\n def with_empty_graph(cls, molecule, name=\"bonds\",\n edge_weight_name=None,\n edge_weight_units=None):\n \"\"\"\n Constructor for MoleculeGraph, returns a MoleculeGraph\n object with an empty graph (no edges, only nodes defined\n that correspond to Sites in Molecule).\n\n :param molecule (Molecule):\n :param name (str): name of graph, e.g. \"bonds\"\n :param edge_weight_name (str): name of edge weights,\n e.g. \"bond_length\" or \"exchange_constant\"\n :param edge_weight_units (str): name of edge weight units\n e.g. \"Å\" or \"eV\"\n :return (MoleculeGraph):\n \"\"\"\n\n if edge_weight_name and (edge_weight_units is None):\n raise ValueError(\"Please specify units associated \"\n \"with your edge weights. Can be \"\n \"empty string if arbitrary or \"\n \"dimensionless.\")\n\n # construct graph with one node per site\n # graph attributes don't change behavior of graph,\n # they're just for book-keeping\n graph = nx.MultiDiGraph(edge_weight_name=edge_weight_name,\n edge_weight_units=edge_weight_units,\n name=name)\n graph.add_nodes_from(range(len(molecule)))\n\n graph_data = json_graph.adjacency_data(graph)\n\n return cls(molecule, graph_data=graph_data)\n\n @staticmethod\n def with_edges(molecule, edges):\n \"\"\"\n Constructor for MoleculeGraph, using pre-existing or pre-defined edges\n with optional edge parameters.\n\n :param molecule: Molecule object\n :param edges: dict representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. Props should be None if no\n additional properties are to be specified.\n :return: mg, a MoleculeGraph\n \"\"\"\n\n mg = MoleculeGraph.with_empty_graph(molecule, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n for edge, props in edges.items():\n\n try:\n from_index = edge[0]\n to_index = edge[1]\n except TypeError:\n raise ValueError(\"Edges must be given as (from_index, to_index)\"\n \"tuples\")\n\n if props is not None:\n if \"weight\" in props.keys():\n weight = props[\"weight\"]\n del props[\"weight\"]\n else:\n weight = None\n\n if len(props.items()) == 0:\n props = None\n else:\n weight = None\n\n nodes = mg.graph.nodes\n if not (from_index in nodes and to_index in nodes):\n raise ValueError(\"Edges cannot be added if nodes are not\"\n \" present in the graph. Please check your\"\n \" indices.\")\n\n mg.add_edge(from_index, to_index, weight=weight,\n edge_properties=props)\n\n mg.set_node_attributes()\n return mg\n\n @staticmethod\n def with_local_env_strategy(molecule, strategy, reorder=True,\n extend_structure=True):\n \"\"\"\n Constructor for MoleculeGraph, using a strategy\n from :Class: `pymatgen.analysis.local_env`.\n\n :param molecule: Molecule object\n :param strategy: an instance of a\n :Class: `pymatgen.analysis.local_env.NearNeighbors` object\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return: mg, a MoleculeGraph\n \"\"\"\n\n mg = MoleculeGraph.with_empty_graph(molecule, name=\"bonds\",\n edge_weight_name=\"weight\",\n edge_weight_units=\"\")\n\n # NearNeighbor classes only (generally) work with structures\n # molecules have to be boxed first\n coords = molecule.cart_coords\n\n if extend_structure:\n a = max(coords[:, 0]) - min(coords[:, 0]) + 100\n b = max(coords[:, 1]) - min(coords[:, 1]) + 100\n c = max(coords[:, 2]) - min(coords[:, 2]) + 100\n\n molecule = molecule.get_boxed_structure(a, b, c, no_cross=True)\n\n for n in range(len(molecule)):\n neighbors = strategy.get_nn_info(molecule, n)\n for neighbor in neighbors:\n\n # all bonds in molecules should not cross\n # (artificial) periodic boundaries\n if not np.array_equal(neighbor['image'], [0, 0, 0]):\n continue\n\n # local_env will always try to add two edges\n # for any one bond, one from site u to site v\n # and another form site v to site u: this is\n # harmless, so warn_duplicates=False\n mg.add_edge(from_index=n,\n to_index=neighbor['site_index'],\n weight=neighbor['weight'],\n warn_duplicates=False)\n\n if reorder:\n # Reverse order of nodes to match with molecule\n n = len(mg.molecule)\n mapping = {i: (n-i) for i in range(n)}\n mapping = {i: (j-1) for i, j in mapping.items()}\n\n mg.graph = nx.relabel_nodes(mg.graph, mapping)\n\n duplicates = []\n for edge in mg.graph.edges:\n if edge[2] != 0:\n duplicates.append(edge)\n\n for duplicate in duplicates:\n mg.graph.remove_edge(duplicate[0], duplicate[1], key=duplicate[2])\n\n mg.set_node_attributes()\n return mg\n\n @property\n def name(self):\n \"\"\"\n :return: Name of graph\n \"\"\"\n return self.graph.graph['name']\n\n @property\n def edge_weight_name(self):\n \"\"\"\n :return: Name of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_name']\n\n @property\n def edge_weight_unit(self):\n \"\"\"\n :return: Units of the edge weight property of graph\n \"\"\"\n return self.graph.graph['edge_weight_units']\n\n def add_edge(self, from_index, to_index,\n weight=None, warn_duplicates=True,\n edge_properties=None):\n \"\"\"\n Add edge to graph.\n\n Since physically a 'bond' (or other connection\n between sites) doesn't have a direction, from_index,\n from_jimage can be swapped with to_index, to_jimage.\n\n However, images will always always be shifted so that\n from_index < to_index and from_jimage becomes (0, 0, 0).\n\n :param from_index: index of site connecting from\n :param to_index: index of site connecting to\n :param weight (float): e.g. bond length\n :param warn_duplicates (bool): if True, will warn if\n trying to add duplicate edges (duplicate edges will not\n be added in either case)\n :param edge_properties (dict): any other information to\n store on graph edges, similar to Structure's site_properties\n :return:\n \"\"\"\n\n # this is not necessary for the class to work, but\n # just makes it neater\n if to_index < from_index:\n to_index, from_index = from_index, to_index\n\n # sanitize types\n from_index, to_index = int(from_index), int(to_index)\n\n # check we're not trying to add a duplicate edge\n # there should only ever be at most one edge\n # between two sites\n existing_edge_data = self.graph.get_edge_data(from_index, to_index)\n if existing_edge_data and warn_duplicates:\n warnings.warn(\"Trying to add an edge that already exists from \"\n \"site {} to site {}.\".format(from_index,\n to_index))\n return\n\n # generic container for additional edge properties,\n # similar to site properties\n edge_properties = edge_properties or {}\n\n if weight:\n self.graph.add_edge(from_index, to_index,\n weight=weight,\n **edge_properties)\n else:\n self.graph.add_edge(from_index, to_index,\n **edge_properties)\n\n def insert_node(self, i, species, coords, validate_proximity=False,\n site_properties=None, edges=None):\n \"\"\"\n A wrapper around Molecule.insert(), which also incorporates the new\n site into the MoleculeGraph.\n\n :param i: Index at which to insert the new site\n :param species: Species for the new site\n :param coords: 3x1 array representing coordinates of the new site\n :param validate_proximity: For Molecule.insert(); if True (default\n False), distance will be checked to ensure that site can be safely\n added.\n :param site_properties: Site properties for Molecule\n :param edges: List of dicts representing edges to be added to the\n MoleculeGraph. These edges must include the index of the new site i,\n and all indices used for these edges should reflect the\n MoleculeGraph AFTER the insertion, NOT before. Each dict should at\n least have a \"to_index\" and \"from_index\" key, and can also have a\n \"weight\" and a \"properties\" key.\n :return:\n \"\"\"\n\n self.molecule.insert(i, species, coords,\n validate_proximity=validate_proximity,\n properties=site_properties)\n\n mapping = {}\n for j in range(len(self.molecule) - 1):\n if j < i:\n mapping[j] = j\n else:\n mapping[j] = j + 1\n nx.relabel_nodes(self.graph, mapping, copy=False)\n\n self.graph.add_node(i)\n self.set_node_attributes()\n\n if edges is not None:\n for edge in edges:\n try:\n self.add_edge(edge[\"from_index\"], edge[\"to_index\"],\n weight=edge.get(\"weight\", None),\n edge_properties=edge.get(\"properties\", None))\n except KeyError:\n raise RuntimeError(\"Some edges are invalid.\")\n\n def set_node_attributes(self):\n \"\"\"\n Replicates molecule site properties (specie, coords, etc.) in the\n MoleculeGraph.\n\n :return:\n \"\"\"\n\n species = {}\n coords = {}\n properties = {}\n for node in self.graph.nodes():\n species[node] = self.molecule[node].specie.symbol\n coords[node] = self.molecule[node].coords\n properties[node] = self.molecule[node].properties\n\n nx.set_node_attributes(self.graph, species, \"specie\")\n nx.set_node_attributes(self.graph, coords, \"coords\")\n nx.set_node_attributes(self.graph, properties, \"properties\")\n\n def alter_edge(self, from_index, to_index,\n new_weight=None, new_edge_properties=None):\n \"\"\"\n Alters either the weight or the edge_properties of\n an edge in the MoleculeGraph.\n\n :param from_index: int\n :param to_index: int\n :param new_weight: alter_edge does not require\n that weight be altered. As such, by default, this\n is None. If weight is to be changed, it should be a\n float.\n :param new_edge_properties: alter_edge does not require\n that edge_properties be altered. As such, by default,\n this is None. If any edge properties are to be changed,\n it should be a dictionary of edge properties to be changed.\n :return:\n \"\"\"\n\n existing_edge = self.graph.get_edge_data(from_index, to_index)\n\n # ensure that edge exists before attempting to change it\n if not existing_edge:\n raise ValueError(\"Edge between {} and {} cannot be altered;\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n # Third index should always be 0 because there should only be one edge between any two nodes\n if new_weight is not None:\n self.graph[from_index][to_index][0]['weight'] = new_weight\n\n if new_edge_properties is not None:\n for prop in list(new_edge_properties.keys()):\n self.graph[from_index][to_index][0][prop] = new_edge_properties[prop]\n\n def break_edge(self, from_index, to_index, allow_reverse=False):\n \"\"\"\n Remove an edge from the MoleculeGraph\n\n :param from_index: int\n :param to_index: int\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return:\n \"\"\"\n\n # ensure that edge exists before attempting to remove it\n existing_edge = self.graph.get_edge_data(from_index, to_index)\n existing_reverse = None\n\n if existing_edge:\n self.graph.remove_edge(from_index, to_index)\n\n else:\n if allow_reverse:\n existing_reverse = self.graph.get_edge_data(to_index,\n from_index)\n\n if existing_reverse:\n self.graph.remove_edge(to_index, from_index)\n else:\n raise ValueError(\"Edge cannot be broken between {} and {};\\\n no edge exists between those sites.\".format(\n from_index, to_index\n ))\n\n def remove_nodes(self, indices):\n \"\"\"\n A wrapper for Molecule.remove_sites().\n\n :param indices: list of indices in the current Molecule (and graph) to\n be removed.\n :return:\n \"\"\"\n\n self.molecule.remove_sites(indices)\n self.graph.remove_nodes_from(indices)\n\n mapping = {}\n for correct, current in enumerate(sorted(self.graph.nodes)):\n mapping[current] = correct\n\n nx.relabel_nodes(self.graph, mapping, copy=False)\n self.set_node_attributes()\n\n def split_molecule_subgraphs(self, bonds, allow_reverse=False,\n alterations=None):\n \"\"\"\n Split MoleculeGraph into two or more MoleculeGraphs by\n breaking a set of bonds. This function uses\n MoleculeGraph.break_edge repeatedly to create\n disjoint graphs (two or more separate molecules).\n This function does not only alter the graph\n information, but also changes the underlying\n Moledules.\n If the bonds parameter does not include sufficient\n bonds to separate two molecule fragments, then this\n function will fail.\n Currently, this function naively assigns the charge\n of the total molecule to a single submolecule. A\n later effort will be to actually accurately assign\n charge.\n NOTE: This function does not modify the original\n MoleculeGraph. It creates a copy, modifies that, and\n returns two or more new MoleculeGraph objects.\n\n :param bonds: list of tuples (from_index, to_index)\n representing bonds to be broken to split the MoleculeGraph.\n :param alterations: a dict {(from_index, to_index): alt},\n where alt is a dictionary including weight and/or edge\n properties to be changed following the split.\n :param allow_reverse: If allow_reverse is True, then break_edge will\n attempt to break both (from_index, to_index) and, failing that,\n will attempt to break (to_index, from_index).\n :return: list of MoleculeGraphs\n \"\"\"\n\n self.set_node_attributes()\n\n original = copy.deepcopy(self)\n\n for bond in bonds:\n original.break_edge(bond[0], bond[1], allow_reverse=allow_reverse)\n\n if nx.is_weakly_connected(original.graph):\n raise MolGraphSplitError(\"Cannot split molecule; \\\n MoleculeGraph is still connected.\")\n else:\n\n # alter any bonds before partition, to avoid remapping\n if alterations is not None:\n for (u, v) in alterations.keys():\n if \"weight\" in alterations[(u, v)]:\n weight = alterations[(u, v)][\"weight\"]\n del alterations[(u, v)][\"weight\"]\n edge_properties = alterations[(u, v)] \\\n if len(alterations[(u, v)]) != 0 else None\n original.alter_edge(u, v, new_weight=weight,\n new_edge_properties=edge_properties)\n else:\n original.alter_edge(u, v,\n new_edge_properties=alterations[(u, v)])\n\n sub_mols = []\n\n # Had to use nx.weakly_connected_components because of deprecation\n # of nx.weakly_connected_component_subgraphs\n components = nx.weakly_connected_components(original.graph)\n subgraphs = [original.graph.subgraph(c) for c in components]\n\n for subg in subgraphs:\n\n nodes = sorted(list(subg.nodes))\n\n # Molecule indices are essentially list-based, so node indices\n # must be remapped, incrementing from 0\n mapping = {}\n for i in range(len(nodes)):\n mapping[nodes[i]] = i\n\n # just give charge to whatever subgraph has node with index 0\n # TODO: actually figure out how to distribute charge\n if 0 in nodes:\n charge = self.molecule.charge\n else:\n charge = 0\n\n # relabel nodes in graph to match mapping\n new_graph = nx.relabel_nodes(subg, mapping)\n\n species = nx.get_node_attributes(new_graph, \"specie\")\n coords = nx.get_node_attributes(new_graph, \"coords\")\n raw_props = nx.get_node_attributes(new_graph, \"properties\")\n\n properties = {}\n for prop_set in raw_props.values():\n for prop in prop_set.keys():\n if prop in properties:\n properties[prop].append(prop_set[prop])\n else:\n properties[prop] = [prop_set[prop]]\n\n # Site properties must be present for all atoms in the molecule\n # in order to be used for Molecule instantiation\n for k, v in properties.items():\n if len(v) != len(species):\n del properties[k]\n\n new_mol = Molecule(species, coords, charge=charge,\n site_properties=properties)\n graph_data = json_graph.adjacency_data(new_graph)\n\n # create new MoleculeGraph\n sub_mols.append(MoleculeGraph(new_mol, graph_data=graph_data))\n\n return sub_mols\n\n def build_unique_fragments(self):\n \"\"\"\n Find all possible fragment combinations of the MoleculeGraphs (in other\n words, all connected induced subgraphs)\n\n :return:\n \"\"\"\n self.set_node_attributes()\n\n graph = self.graph.to_undirected()\n\n nm = iso.categorical_node_match(\"specie\", \"ERROR\")\n\n # find all possible fragments, aka connected induced subgraphs\n all_fragments = []\n for ii in range(1, len(self.molecule)):\n for combination in combinations(graph.nodes, ii):\n subgraph = nx.subgraph(graph, combination)\n if nx.is_connected(subgraph):\n all_fragments.append(subgraph)\n\n # narrow to all unique fragments using graph isomorphism\n unique_fragments = []\n for fragment in all_fragments:\n if not [nx.is_isomorphic(fragment, f, node_match=nm)\n for f in unique_fragments].count(True) >= 1:\n unique_fragments.append(fragment)\n\n # convert back to molecule graphs\n unique_mol_graphs = []\n for fragment in unique_fragments:\n mapping = {e: i for i, e in enumerate(sorted(fragment.nodes))}\n remapped = nx.relabel_nodes(fragment, mapping)\n\n species = nx.get_node_attributes(remapped, \"specie\")\n coords = nx.get_node_attributes(remapped, \"coords\")\n\n edges = {}\n\n for from_index, to_index, key in remapped.edges:\n edge_props = fragment.get_edge_data(from_index, to_index, key=key)\n\n edges[(from_index, to_index)] = edge_props\n\n unique_mol_graphs.append(self.with_edges(Molecule(species=species,\n coords=coords,\n charge=self.molecule.charge),\n edges))\n return unique_mol_graphs\n\n def substitute_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None, reorder=True,\n extend_structure=True):\n \"\"\"\n Builds off of Molecule.substitute to replace an atom in self.molecule\n with a functional group. This method also amends self.graph to\n incorporate the new functional group.\n\n NOTE: using a MoleculeGraph will generally produce a different graph\n compared with using a Molecule or str (when not using graph_dict).\n This is because of the reordering that occurs when using some of the\n local_env strategies.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are three options:\n\n 1. Providing an actual molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n 3. A MoleculeGraph object.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return:\n \"\"\"\n\n def map_indices(grp):\n grp_map = {}\n\n # Get indices now occupied by functional group\n # Subtracting 1 because the dummy atom X should not count\n atoms = len(grp) - 1\n offset = len(self.molecule) - atoms\n\n for i in range(atoms):\n grp_map[i] = i + offset\n\n return grp_map\n\n # Work is simplified if a graph is already in place\n if isinstance(func_grp, MoleculeGraph):\n\n self.molecule.substitute(index, func_grp.molecule,\n bond_order=bond_order)\n\n mapping = map_indices(func_grp.molecule)\n\n for (u, v) in list(func_grp.graph.edges()):\n edge_props = func_grp.graph.get_edge_data(u, v)[0]\n weight = None\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n else:\n if isinstance(func_grp, Molecule):\n func_grp = copy.deepcopy(func_grp)\n else:\n try:\n func_grp = copy.deepcopy(FunctionalGroups[func_grp])\n except:\n raise RuntimeError(\"Can't find functional group in list. \"\n \"Provide explicit coordinate instead\")\n\n self.molecule.substitute(index, func_grp, bond_order=bond_order)\n\n mapping = map_indices(func_grp)\n\n # Remove dummy atom \"X\"\n func_grp.remove_species(\"X\")\n\n if graph_dict is not None:\n for (u, v) in graph_dict.keys():\n edge_props = graph_dict[(u, v)]\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n else:\n if strategy_params is None:\n strategy_params = {}\n strat = strategy(**strategy_params)\n graph = self.with_local_env_strategy(func_grp, strat, reorder=reorder,\n extend_structure=extend_structure)\n\n for (u, v) in list(graph.graph.edges()):\n edge_props = graph.graph.get_edge_data(u, v)[0]\n weight = None\n if \"weight\" in edge_props.keys():\n weight = edge_props[\"weight\"]\n del edge_props[\"weight\"]\n\n if 0 not in list(graph.graph.nodes()):\n # If graph indices have different indexing\n u, v = (u-1), (v-1)\n\n self.add_edge(mapping[u], mapping[v],\n weight=weight, edge_properties=edge_props)\n\n def replace_group(self, index, func_grp, strategy, bond_order=1,\n graph_dict=None, strategy_params=None, reorder=True,\n extend_structure=True):\n \"\"\"\n Builds off of Molecule.substitute and MoleculeGraph.substitute_group\n to replace a functional group in self.molecule with a functional group.\n This method also amends self.graph to incorporate the new functional\n group.\n\n TODO: Figure out how to replace into a ring structure.\n\n :param index: Index of atom to substitute.\n :param func_grp: Substituent molecule. There are three options:\n\n 1. Providing an actual molecule as the input. The first atom\n must be a DummySpecie X, indicating the position of\n nearest neighbor. The second atom must be the next\n nearest atom. For example, for a methyl group\n substitution, func_grp should be X-CH3, where X is the\n first site and C is the second site. What the code will\n do is to remove the index site, and connect the nearest\n neighbor to the C atom in CH3. The X-C bond indicates the\n directionality to connect the atoms.\n 2. A string name. The molecule will be obtained from the\n relevant template in func_groups.json.\n 3. A MoleculeGraph object.\n :param strategy: Class from pymatgen.analysis.local_env.\n :param bond_order: A specified bond order to calculate the bond\n length between the attached functional group and the nearest\n neighbor site. Defaults to 1.\n :param graph_dict: Dictionary representing the bonds of the functional\n group (format: {(u, v): props}, where props is a dictionary of\n properties, including weight. If None, then the algorithm\n will attempt to automatically determine bonds using one of\n a list of strategies defined in pymatgen.analysis.local_env.\n :param strategy_params: dictionary of keyword arguments for strategy.\n If None, default parameters will be used.\n :param reorder: bool, representing if graph nodes need to be reordered\n following the application of the local_env strategy\n :param extend_structure: If True (default), then a large artificial box\n will be placed around the Molecule, because some strategies assume\n periodic boundary conditions.\n :return:\n \"\"\"\n\n self.set_node_attributes()\n neighbors = self.get_connected_sites(index)\n\n # If the atom at index is terminal\n if len(neighbors) == 1:\n self.substitute_group(index, func_grp, strategy,\n bond_order=bond_order, graph_dict=graph_dict,\n strategy_params=strategy_params,\n reorder=reorder,\n extend_structure=extend_structure)\n\n else:\n rings = self.find_rings(including=[index])\n if len(rings) != 0:\n raise RuntimeError(\"Currently functional group replacement\"\n \"cannot occur at an atom within a ring\"\n \"structure.\")\n\n to_remove = set()\n sizes = dict()\n disconnected = self.graph.to_undirected()\n disconnected.remove_node(index)\n for neighbor in neighbors:\n sizes[neighbor[2]] = len(nx.descendants(disconnected, neighbor[2]))\n\n keep = max(sizes, key=lambda x: sizes[x])\n for i in sizes.keys():\n if i != keep:\n to_remove.add(i)\n\n self.remove_nodes(list(to_remove))\n\n self.substitute_group(index, func_grp, strategy,\n bond_order=bond_order, graph_dict=graph_dict,\n strategy_params=strategy_params,\n reorder=reorder,\n extend_structure=extend_structure)\n\n def find_rings(self, including=None):\n \"\"\"\n Find ring structures in the MoleculeGraph.\n\n :param including: list of site indices. If\n including is not None, then find_rings will\n only return those rings including the specified\n sites. By default, this parameter is None, and\n all rings will be returned.\n :return: dict {index:cycle}. Each\n entry will be a ring (cycle, in graph theory terms) including the index\n found in the Molecule. If there is no cycle including an index, the\n value will be an empty list.\n \"\"\"\n\n # Copies self.graph such that all edges (u, v) matched by edges (v, u)\n undirected = self.graph.to_undirected()\n directed = undirected.to_directed()\n\n cycles_nodes = []\n cycles_edges = []\n\n # Remove all two-edge cycles\n all_cycles = [c for c in nx.simple_cycles(directed) if len(c) > 2]\n\n # Using to_directed() will mean that each cycle always appears twice\n # So, we must also remove duplicates\n unique_sorted = []\n unique_cycles = []\n for cycle in all_cycles:\n if sorted(cycle) not in unique_sorted:\n unique_sorted.append(sorted(cycle))\n unique_cycles.append(cycle)\n\n if including is None:\n cycles_nodes = unique_cycles\n else:\n for i in including:\n for cycle in unique_cycles:\n if i in cycle and cycle not in cycles_nodes:\n cycles_nodes.append(cycle)\n\n for cycle in cycles_nodes:\n edges = []\n for i, e in enumerate(cycle):\n edges.append((cycle[i-1], e))\n cycles_edges.append(edges)\n\n return cycles_edges\n\n def get_connected_sites(self, n):\n \"\"\"\n Returns a named tuple of neighbors of site n:\n periodic_site, jimage, index, weight.\n Index is the index of the corresponding site\n in the original structure, weight can be\n None if not defined.\n :param n: index of Site in Molecule\n :param jimage: lattice vector of site\n :return: list of ConnectedSite tuples,\n sorted by closest first\n \"\"\"\n\n connected_sites = set()\n\n out_edges = [(u, v, d) for u, v, d in self.graph.out_edges(n, data=True)]\n in_edges = [(u, v, d) for u, v, d in self.graph.in_edges(n, data=True)]\n\n for u, v, d in out_edges + in_edges:\n\n weight = d.get('weight', None)\n\n if v == n:\n site = self.molecule[u]\n dist = self.molecule[v].distance(self.molecule[u])\n\n connected_site = ConnectedSite(site=site,\n jimage=(0, 0, 0),\n index=u,\n weight=weight,\n dist=dist)\n else:\n site = self.molecule[v]\n dist = self.molecule[u].distance(self.molecule[v])\n\n connected_site = ConnectedSite(site=site,\n jimage=(0, 0, 0),\n index=v,\n weight=weight,\n dist=dist)\n\n connected_sites.add(connected_site)\n\n # return list sorted by closest sites first\n connected_sites = list(connected_sites)\n connected_sites.sort(key=lambda x: x.dist)\n\n return connected_sites\n\n def get_coordination_of_site(self, n):\n \"\"\"\n Returns the number of neighbors of site n.\n In graph terms, simply returns degree\n of node corresponding to site n.\n :param n: index of site\n :return (int):\n \"\"\"\n number_of_self_loops = sum([1 for n, v in self.graph.edges(n) if n == v])\n return self.graph.degree(n) - number_of_self_loops\n\n def draw_graph_to_file(self, filename=\"graph\",\n diff=None,\n hide_unconnected_nodes=False,\n hide_image_edges=True,\n edge_colors=False,\n node_labels=False,\n weight_labels=False,\n image_labels=False,\n color_scheme=\"VESTA\",\n keep_dot=False,\n algo=\"fdp\"):\n \"\"\"\n Draws graph using GraphViz.\n\n The networkx graph object itself can also be drawn\n with networkx's in-built graph drawing methods, but\n note that this might give misleading results for\n multigraphs (edges are super-imposed on each other).\n\n If visualization is difficult to interpret,\n `hide_image_edges` can help, especially in larger\n graphs.\n\n :param filename: filename to output, will detect filetype\n from extension (any graphviz filetype supported, such as\n pdf or png)\n :param diff (StructureGraph): an additional graph to\n compare with, will color edges red that do not exist in diff\n and edges green that are in diff graph but not in the\n reference graph\n :param hide_unconnected_nodes: if True, hide unconnected\n nodes\n :param hide_image_edges: if True, do not draw edges that\n go through periodic boundaries\n :param edge_colors (bool): if True, use node colors to\n color edges\n :param node_labels (bool): if True, label nodes with\n species and site index\n :param weight_labels (bool): if True, label edges with\n weights\n :param image_labels (bool): if True, label edges with\n their periodic images (usually only used for debugging,\n edges to periodic images always appear as dashed lines)\n :param color_scheme (str): \"VESTA\" or \"JMOL\"\n :param keep_dot (bool): keep GraphViz .dot file for later\n visualization\n :param algo: any graphviz algo, \"neato\" (for simple graphs)\n or \"fdp\" (for more crowded graphs) usually give good outputs\n :return:\n \"\"\"\n\n if not which(algo):\n raise RuntimeError(\"StructureGraph graph drawing requires \"\n \"GraphViz binaries to be in the path.\")\n\n # Developer note: NetworkX also has methods for drawing\n # graphs using matplotlib, these also work here. However,\n # a dedicated tool like GraphViz allows for much easier\n # control over graph appearance and also correctly displays\n # mutli-graphs (matplotlib can superimpose multiple edges).\n\n g = self.graph.copy()\n\n g.graph = {'nodesep': 10.0, 'dpi': 300, 'overlap': \"false\"}\n\n # add display options for nodes\n for n in g.nodes():\n\n # get label by species name\n label = \"{}({})\".format(str(self.molecule[n].specie), n) if node_labels else \"\"\n\n # use standard color scheme for nodes\n c = EL_COLORS[color_scheme].get(str(self.molecule[n].specie.symbol), [0, 0, 0])\n\n # get contrasting font color\n # magic numbers account for perceived luminescence\n # https://stackoverflow.com/questions/1855884/determine-font-color-based-on-background-color\n fontcolor = '#000000' if 1 - (c[0] * 0.299 + c[1] * 0.587\n + c[2] * 0.114) / 255 < 0.5 else '#ffffff'\n\n # convert color to hex string\n color = \"#{:02x}{:02x}{:02x}\".format(c[0], c[1], c[2])\n\n g.add_node(n, fillcolor=color, fontcolor=fontcolor, label=label,\n fontname=\"Helvetica-bold\", style=\"filled\", shape=\"circle\")\n\n edges_to_delete = []\n\n # add display options for edges\n for u, v, k, d in g.edges(keys=True, data=True):\n\n # retrieve from/to images, set as origin if not defined\n if \"to_image\" in d:\n to_image = d['to_jimage']\n else:\n to_image = (0, 0, 0)\n\n # set edge style\n d['style'] = \"solid\"\n if to_image != (0, 0, 0):\n d['style'] = \"dashed\"\n if hide_image_edges:\n edges_to_delete.append((u, v, k))\n\n # don't show edge directions\n d['arrowhead'] = \"none\"\n\n # only add labels for images that are not the origin\n if image_labels:\n d['headlabel'] = \"\" if to_image == (0, 0, 0) else \"to {}\".format((to_image))\n d['arrowhead'] = \"normal\" if d['headlabel'] else \"none\"\n\n # optionally color edges using node colors\n color_u = g.node[u]['fillcolor']\n color_v = g.node[v]['fillcolor']\n d['color_uv'] = \"{};0.5:{};0.5\".format(color_u, color_v) if edge_colors else \"#000000\"\n\n # optionally add weights to graph\n if weight_labels:\n units = g.graph.get('edge_weight_units', \"\")\n if d.get('weight'):\n d['label'] = \"{:.2f} {}\".format(d['weight'], units)\n\n # update edge with our new style attributes\n g.edges[u, v, k].update(d)\n\n # optionally remove periodic image edges,\n # these can be confusing due to periodic boundaries\n if hide_image_edges:\n for edge_to_delete in edges_to_delete:\n g.remove_edge(*edge_to_delete)\n\n # optionally hide unconnected nodes,\n # these can appear when removing periodic edges\n if hide_unconnected_nodes:\n g = g.subgraph([n for n in g.degree() if g.degree()[n] != 0])\n\n # optionally highlight differences with another graph\n if diff:\n diff = self.diff(diff, strict=True)\n green_edges = []\n red_edges = []\n for u, v, k, d in g.edges(keys=True, data=True):\n if (u, v, d['to_jimage']) in diff['self']:\n # edge has been deleted\n red_edges.append((u, v, k))\n elif (u, v, d['to_jimage']) in diff['other']:\n # edge has been added\n green_edges.append((u, v, k))\n for u, v, k in green_edges:\n g.edges[u, v, k].update({'color_uv': '#00ff00'})\n for u, v, k in red_edges:\n g.edges[u, v, k].update({'color_uv': '#ff0000'})\n\n basename, extension = os.path.splitext(filename)\n extension = extension[1:]\n\n write_dot(g, basename+\".dot\")\n\n with open(filename, \"w\") as f:\n\n args = [algo, \"-T\", extension, basename+\".dot\"]\n rs = subprocess.Popen(args,\n stdout=f,\n stdin=subprocess.PIPE, close_fds=True)\n rs.communicate()\n if rs.returncode != 0:\n raise RuntimeError(\"{} exited with return code {}.\".format(algo, rs.returncode))\n\n if not keep_dot:\n os.remove(basename+\".dot\")\n\n def as_dict(self):\n \"\"\"\n As in :Class: `pymatgen.core.Molecule` except\n with using `to_dict_of_dicts` from NetworkX\n to store graph information.\n \"\"\"\n\n d = {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"molecule\": self.molecule.as_dict(),\n \"graphs\": json_graph.adjacency_data(self.graph)}\n\n return d\n\n @classmethod\n def from_dict(cls, d):\n \"\"\"\n As in :Class: `pymatgen.core.Molecule` except\n restoring graphs using `from_dict_of_dicts`\n from NetworkX to restore graph information.\n \"\"\"\n m = Molecule.from_dict(d['molecule'])\n return cls(m, d['graphs'])\n\n def _edges_to_string(self, g):\n\n header = \"from to to_image \"\n header_line = \"---- ---- ------------\"\n edge_weight_name = g.graph[\"edge_weight_name\"]\n if edge_weight_name:\n print_weights = [\"weight\"]\n edge_label = g.graph[\"edge_weight_name\"]\n edge_weight_units = g.graph[\"edge_weight_units\"]\n if edge_weight_units:\n edge_label += \" ({})\".format(edge_weight_units)\n header += \" {}\".format(edge_label)\n header_line += \" {}\".format(\"-\"*max([18, len(edge_label)]))\n else:\n print_weights = False\n\n s = header + \"\\n\" + header_line + \"\\n\"\n\n edges = list(g.edges(data=True))\n\n # sort edges for consistent ordering\n edges.sort(key=itemgetter(0, 1))\n\n if print_weights:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12} {:.3e}\\n\".format(u, v, str(data.get(\"to_jimage\", (0, 0, 0))),\n data.get(\"weight\", 0))\n else:\n for u, v, data in edges:\n s += \"{:4} {:4} {:12}\\n\".format(u, v,\n str(data.get(\"to_jimage\", (0, 0, 0))))\n\n return s\n\n def __str__(self):\n s = \"Molecule Graph\"\n s += \"\\nMolecule: \\n{}\".format(self.molecule.__str__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __repr__(self):\n s = \"Molecule Graph\"\n s += \"\\nMolecule: \\n{}\".format(self.molecule.__repr__())\n s += \"\\nGraph: {}\\n\".format(self.name)\n s += self._edges_to_string(self.graph)\n return s\n\n def __len__(self):\n \"\"\"\n :return: length of Molecule / number of nodes in graph\n \"\"\"\n return len(self.molecule)\n\n def sort(self, key=None, reverse=False):\n \"\"\"\n Same as Molecule.sort(), also remaps nodes in graph.\n :param key:\n :param reverse:\n :return:\n \"\"\"\n\n old_molecule = self.molecule.copy()\n\n # sort Molecule\n self.molecule._sites = sorted(self.molecule._sites, key=key, reverse=reverse)\n\n # apply Molecule ordering to graph\n mapping = {idx: self.molecule.index(site) for idx, site in enumerate(old_molecule)}\n self.graph = nx.relabel_nodes(self.graph, mapping, copy=True)\n\n # normalize directions of edges\n edges_to_remove = []\n edges_to_add = []\n for u, v, k, d in self.graph.edges(keys=True, data=True):\n if v < u:\n new_v, new_u, new_d = u, v, d.copy()\n new_d['to_jimage'] = (0, 0, 0)\n edges_to_remove.append((u, v, k))\n edges_to_add.append((new_u, new_v, new_d))\n\n # add/delete marked edges\n for edges_to_remove in edges_to_remove:\n self.graph.remove_edge(*edges_to_remove)\n for (u, v, d) in edges_to_add:\n self.graph.add_edge(u, v, **d)\n\n def __copy__(self):\n return MoleculeGraph.from_dict(self.as_dict())\n\n def __eq__(self, other):\n \"\"\"\n Two MoleculeGraphs are equal if they have equal Molecules,\n and have the same edges between Sites. Edge weights can be\n different and MoleculeGraphs can still be considered equal.\n\n :param other: MoleculeGraph\n :return (bool):\n \"\"\"\n\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n try:\n mapping = {tuple(site.coords):self.molecule.index(site) for site in other.molecule}\n except ValueError:\n return False\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.coords)])\n\n edges = {(u, v)\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v) for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n return (edges == edges_other) and \\\n (self.molecule == other_sorted.molecule)\n\n def isomorphic_to(self, other):\n \"\"\"\n Checks if the graphs of two MoleculeGraphs are isomorphic to one\n another. In order to prevent problems with misdirected edges, both\n graphs are converted into undirected nx.Graph objects.\n\n :param other: MoleculeGraph object to be compared.\n :return: bool\n \"\"\"\n if self.molecule.composition != other.molecule.composition:\n return False\n else:\n self_undir = self.graph.to_undirected()\n other_undir = other.graph.to_undirected()\n nm = iso.categorical_node_match(\"specie\", \"ERROR\")\n isomorphic = nx.is_isomorphic(self_undir, other_undir, node_match=nm)\n return isomorphic\n\n def diff(self, other, strict=True):\n \"\"\"\n Compares two MoleculeGraphs. Returns dict with\n keys 'self', 'other', 'both' with edges that are\n present in only one MoleculeGraph ('self' and\n 'other'), and edges that are present in both.\n\n The Jaccard distance is a simple measure of the\n dissimilarity between two MoleculeGraphs (ignoring\n edge weights), and is defined by 1 - (size of the\n intersection / size of the union) of the sets of\n edges. This is returned with key 'dist'.\n\n Important note: all node indices are in terms\n of the MoleculeGraph this method is called\n from, not the 'other' MoleculeGraph: there\n is no guarantee the node indices will be the\n same if the underlying Molecules are ordered\n differently.\n\n :param other: MoleculeGraph\n :param strict: if False, will compare bonds\n from different Molecules, with node indices\n replaced by Specie strings, will not count\n number of occurrences of bonds\n :return:\n \"\"\"\n\n if self.molecule != other.molecule and strict:\n return ValueError(\"Meaningless to compare MoleculeGraphs if \"\n \"corresponding Molecules are different.\")\n\n if strict:\n # sort for consistent node indices\n # PeriodicSite should have a proper __hash__() value,\n # using its frac_coords as a convenient key\n mapping = {tuple(site.frac_coords):self.molecule.index(site) for site in other.molecule}\n other_sorted = other.__copy__()\n other_sorted.sort(key=lambda site: mapping[tuple(site.frac_coords)])\n\n edges = {(u, v, d.get('to_jimage', (0, 0, 0)))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(u, v, d.get('to_jimage', (0, 0, 0)))\n for u, v, d in other_sorted.graph.edges(keys=False, data=True)}\n\n else:\n\n edges = {(str(self.molecule[u].specie),\n str(self.molecule[v].specie))\n for u, v, d in self.graph.edges(keys=False, data=True)}\n\n edges_other = {(str(other.structure[u].specie),\n str(other.structure[v].specie))\n for u, v, d in other.graph.edges(keys=False, data=True)}\n\n if len(edges) == 0 and len(edges_other) == 0:\n jaccard_dist = 0 # by definition\n else:\n jaccard_dist = 1 - len(edges.intersection(edges_other)) / len(edges.union(edges_other))\n\n return {\n 'self': edges - edges_other,\n 'other': edges_other - edges,\n 'both': edges.intersection(edges_other),\n 'dist': jaccard_dist\n }\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport os\nimport re\nimport itertools\nimport warnings\nimport logging\nimport math\nimport glob\nimport subprocess\n\nimport numpy as np\nfrom pymatgen.util.typing import PathLike\nfrom numpy.linalg import det\nfrom collections import OrderedDict, namedtuple\nfrom hashlib import md5\n\nfrom monty.io import zopen\nfrom monty.os.path import zpath\nfrom monty.json import MontyDecoder\nfrom monty.os import cd\n\nfrom enum import Enum\nfrom tabulate import tabulate\n\nimport scipy.constants as const\n\nfrom pymatgen import SETTINGS\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.structure import Structure\nfrom pymatgen.core.periodic_table import Element, get_el_sp\nfrom pymatgen.electronic_structure.core import Magmom\nfrom monty.design_patterns import cached_class\nfrom pymatgen.util.string import str_delimited\nfrom pymatgen.util.io_utils import clean_lines\nfrom monty.json import MSONable\n\n\"\"\"\nClasses for reading/manipulating/writing VASP input files. All major VASP input\nfiles.\n\"\"\"\n\n__author__ = \"Shyue Ping Ong, Geoffroy Hautier, Rickard Armiento, \" + \\\n \"Vincent L Chevrier, Stephen Dacek\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"1.1\"\n__maintainer__ = \"Shyue Ping Ong\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"Jul 16, 2012\"\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass Poscar(MSONable):\n \"\"\"\n Object for representing the data in a POSCAR or CONTCAR file.\n Please note that this current implementation. Most attributes can be set\n directly.\n\n Args:\n structure (Structure): Structure object.\n comment (str): Optional comment line for POSCAR. Defaults to unit\n cell formula of structure. Defaults to None.\n selective_dynamics (Nx3 array): bool values for selective dynamics,\n where N is number of sites. Defaults to None.\n true_names (bool): Set to False is the names in the POSCAR are not\n well-defined and ambiguous. This situation arises commonly in\n vasp < 5 where the POSCAR sometimes does not contain element\n symbols. Defaults to True.\n velocities (Nx3 array): Velocities for the POSCAR. Typically parsed\n in MD runs or can be used to initialize velocities.\n predictor_corrector (Nx3 array): Predictor corrector for the POSCAR.\n Typically parsed in MD runs.\n\n .. attribute:: structure\n\n Associated Structure.\n\n .. attribute:: comment\n\n Optional comment string.\n\n .. attribute:: true_names\n\n Boolean indication whether Poscar contains actual real names parsed\n from either a POTCAR or the POSCAR itself.\n\n .. attribute:: selective_dynamics\n\n Selective dynamics attribute for each site if available. A Nx3 array of\n booleans.\n\n .. attribute:: velocities\n\n Velocities for each site (typically read in from a CONTCAR). A Nx3\n array of floats.\n\n .. attribute:: predictor_corrector\n\n Predictor corrector coordinates and derivatives for each site; i.e.\n a list of three 1x3 arrays for each site (typically read in from a MD \n CONTCAR).\n\n .. attribute:: predictor_corrector_preamble\n\n Predictor corrector preamble contains the predictor-corrector key,\n POTIM, and thermostat parameters that precede the site-specic predictor \n corrector data in MD CONTCAR\n\n .. attribute:: temperature\n\n Temperature of velocity Maxwell-Boltzmann initialization. Initialized\n to -1 (MB hasn\"t been performed).\n \"\"\"\n\n def __init__(self, structure, comment=None, selective_dynamics=None,\n true_names=True, velocities=None, predictor_corrector=None,\n predictor_corrector_preamble=None):\n if structure.is_ordered:\n site_properties = {}\n if selective_dynamics:\n site_properties[\"selective_dynamics\"] = selective_dynamics\n if velocities:\n site_properties[\"velocities\"] = velocities\n if predictor_corrector:\n site_properties[\"predictor_corrector\"] = predictor_corrector\n structure = Structure.from_sites(structure)\n self.structure = structure.copy(site_properties=site_properties)\n self.true_names = true_names\n self.comment = structure.formula if comment is None else comment\n self.predictor_corrector_preamble = predictor_corrector_preamble\n else:\n raise ValueError(\"Structure with partial occupancies cannot be \"\n \"converted into POSCAR!\")\n\n self.temperature = -1\n\n @property\n def velocities(self):\n return self.structure.site_properties.get(\"velocities\")\n\n @property\n def selective_dynamics(self):\n return self.structure.site_properties.get(\"selective_dynamics\")\n\n @property\n def predictor_corrector(self):\n return self.structure.site_properties.get(\"predictor_corrector\")\n\n @velocities.setter\n def velocities(self, velocities):\n self.structure.add_site_property(\"velocities\", velocities)\n\n @selective_dynamics.setter\n def selective_dynamics(self, selective_dynamics):\n self.structure.add_site_property(\"selective_dynamics\",\n selective_dynamics)\n\n @predictor_corrector.setter\n def predictor_corrector(self, predictor_corrector):\n self.structure.add_site_property(\"predictor_corrector\",\n predictor_corrector)\n\n @property\n def site_symbols(self):\n \"\"\"\n Sequence of symbols associated with the Poscar. Similar to 6th line in\n vasp 5+ POSCAR.\n \"\"\"\n syms = [site.specie.symbol for site in self.structure]\n return [a[0] for a in itertools.groupby(syms)]\n\n @property\n def natoms(self):\n \"\"\"\n Sequence of number of sites of each type associated with the Poscar.\n Similar to 7th line in vasp 5+ POSCAR or the 6th line in vasp 4 POSCAR.\n \"\"\"\n syms = [site.specie.symbol for site in self.structure]\n return [len(tuple(a[1])) for a in itertools.groupby(syms)]\n\n def __setattr__(self, name, value):\n if name in (\"selective_dynamics\", \"velocities\"):\n if value is not None and len(value) > 0:\n value = np.array(value)\n dim = value.shape\n if dim[1] != 3 or dim[0] != len(self.structure):\n raise ValueError(name + \" array must be same length as\" +\n \" the structure.\")\n value = value.tolist()\n super(Poscar, self).__setattr__(name, value)\n\n @staticmethod\n def from_file(filename, check_for_POTCAR=True, read_velocities=True):\n \"\"\"\n Reads a Poscar from a file.\n\n The code will try its best to determine the elements in the POSCAR in\n the following order:\n 1. If check_for_POTCAR is True, the code will try to check if a POTCAR\n is in the same directory as the POSCAR and use elements from that by\n default. (This is the VASP default sequence of priority).\n 2. If the input file is Vasp5-like and contains element symbols in the\n 6th line, the code will use that if check_for_POTCAR is False or there\n is no POTCAR found.\n 3. Failing (2), the code will check if a symbol is provided at the end\n of each coordinate.\n\n If all else fails, the code will just assign the first n elements in\n increasing atomic number, where n is the number of species, to the\n Poscar. For example, H, He, Li, .... This will ensure at least a\n unique element is assigned to each site and any analysis that does not\n require specific elemental properties should work fine.\n\n Args:\n filename (str): File name containing Poscar data.\n check_for_POTCAR (bool): Whether to check if a POTCAR is present\n in the same directory as the POSCAR. Defaults to True.\n read_velocities (bool): Whether to read or not velocities if they\n are present in the POSCAR. Default is True.\n\n Returns:\n Poscar object.\n \"\"\"\n dirname = os.path.dirname(os.path.abspath(filename))\n names = None\n if check_for_POTCAR:\n potcars = glob.glob(os.path.join(dirname, \"*POTCAR*\"))\n if potcars:\n try:\n potcar = Potcar.from_file(sorted(potcars)[0])\n names = [sym.split(\"_\")[0] for sym in potcar.symbols]\n [get_el_sp(n) for n in names] # ensure valid names\n except:\n names = None\n with zopen(filename, \"rt\") as f:\n return Poscar.from_string(f.read(), names,\n read_velocities=read_velocities)\n\n @staticmethod\n def from_string(data, default_names=None, read_velocities=True):\n \"\"\"\n Reads a Poscar from a string.\n\n The code will try its best to determine the elements in the POSCAR in\n the following order:\n 1. If default_names are supplied and valid, it will use those. Usually,\n default names comes from an external source, such as a POTCAR in the\n same directory.\n 2. If there are no valid default names but the input file is Vasp5-like\n and contains element symbols in the 6th line, the code will use that.\n 3. Failing (2), the code will check if a symbol is provided at the end\n of each coordinate.\n\n If all else fails, the code will just assign the first n elements in\n increasing atomic number, where n is the number of species, to the\n Poscar. For example, H, He, Li, .... This will ensure at least a\n unique element is assigned to each site and any analysis that does not\n require specific elemental properties should work fine.\n\n Args:\n data (str): String containing Poscar data.\n default_names ([str]): Default symbols for the POSCAR file,\n usually coming from a POTCAR in the same directory.\n read_velocities (bool): Whether to read or not velocities if they\n are present in the POSCAR. Default is True.\n\n Returns:\n Poscar object.\n \"\"\"\n # \"^\\s*$\" doesn't match lines with no whitespace\n chunks = re.split(r\"\\n\\s*\\n\", data.rstrip(), flags=re.MULTILINE)\n try:\n if chunks[0] == \"\":\n chunks.pop(0)\n chunks[0] = \"\\n\" + chunks[0]\n except IndexError:\n raise ValueError(\"Empty POSCAR\")\n\n # Parse positions\n lines = tuple(clean_lines(chunks[0].split(\"\\n\"), False))\n comment = lines[0]\n scale = float(lines[1])\n lattice = np.array([[float(i) for i in line.split()]\n for line in lines[2:5]])\n if scale < 0:\n # In vasp, a negative scale factor is treated as a volume. We need\n # to translate this to a proper lattice vector scaling.\n vol = abs(det(lattice))\n lattice *= (-scale / vol) ** (1 / 3)\n else:\n lattice *= scale\n\n vasp5_symbols = False\n try:\n natoms = [int(i) for i in lines[5].split()]\n ipos = 6\n except ValueError:\n vasp5_symbols = True\n symbols = lines[5].split()\n\n \"\"\"\n Atoms and number of atoms in POSCAR written with vasp appear on \n multiple lines when atoms of the same type are not grouped together \n and more than 20 groups are then defined ...\n \n Example :\n \n Cr16 Fe35 Ni2\n 1.00000000000000\n 8.5415010000000002 -0.0077670000000000 -0.0007960000000000\n -0.0077730000000000 8.5224019999999996 0.0105580000000000\n -0.0007970000000000 0.0105720000000000 8.5356889999999996\n Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Cr Fe Ni Fe Cr Fe Cr\n Fe Ni Fe Cr Fe\n 1 1 2 4 2 1 1 1 2 1 1 1 4 1 1 1 5 3 6 1\n 2 1 3 2 5\n Direct\n ...\n \"\"\"\n nlines_symbols = 1\n for nlines_symbols in range(1, 11):\n try:\n int(lines[5+nlines_symbols].split()[0])\n break\n except ValueError:\n pass\n for iline_symbols in range(6, 5+nlines_symbols):\n symbols.extend(lines[iline_symbols].split())\n natoms = []\n iline_natoms_start = 5+nlines_symbols\n for iline_natoms in range(iline_natoms_start,\n iline_natoms_start+nlines_symbols):\n natoms.extend([int(i) for i in lines[iline_natoms].split()])\n atomic_symbols = list()\n for i in range(len(natoms)):\n atomic_symbols.extend([symbols[i]] * natoms[i])\n ipos = 5+2*nlines_symbols\n\n postype = lines[ipos].split()[0]\n\n sdynamics = False\n # Selective dynamics\n if postype[0] in \"sS\":\n sdynamics = True\n ipos += 1\n postype = lines[ipos].split()[0]\n\n cart = postype[0] in \"cCkK\"\n nsites = sum(natoms)\n\n # If default_names is specified (usually coming from a POTCAR), use\n # them. This is in line with Vasp\"s parsing order that the POTCAR\n # specified is the default used.\n if default_names:\n try:\n atomic_symbols = []\n for i in range(len(natoms)):\n atomic_symbols.extend([default_names[i]] * natoms[i])\n vasp5_symbols = True\n except IndexError:\n pass\n\n if not vasp5_symbols:\n ind = 3 if not sdynamics else 6\n try:\n # Check if names are appended at the end of the coordinates.\n atomic_symbols = [l.split()[ind]\n for l in lines[ipos + 1:ipos + 1 + nsites]]\n # Ensure symbols are valid elements\n if not all([Element.is_valid_symbol(sym)\n for sym in atomic_symbols]):\n raise ValueError(\"Non-valid symbols detected.\")\n vasp5_symbols = True\n except (ValueError, IndexError):\n # Defaulting to false names.\n atomic_symbols = []\n for i in range(len(natoms)):\n sym = Element.from_Z(i + 1).symbol\n atomic_symbols.extend([sym] * natoms[i])\n warnings.warn(\"Elements in POSCAR cannot be determined. \"\n \"Defaulting to false names %s.\" %\n \" \".join(atomic_symbols))\n\n # read the atomic coordinates\n coords = []\n selective_dynamics = list() if sdynamics else None\n for i in range(nsites):\n toks = lines[ipos + 1 + i].split()\n crd_scale = scale if cart else 1\n coords.append([float(j) * crd_scale for j in toks[:3]])\n if sdynamics:\n selective_dynamics.append([tok.upper()[0] == \"T\"\n for tok in toks[3:6]])\n\n struct = Structure(lattice, atomic_symbols, coords,\n to_unit_cell=False, validate_proximity=False,\n coords_are_cartesian=cart)\n\n if read_velocities:\n # Parse velocities if any\n velocities = []\n if len(chunks) > 1:\n for line in chunks[1].strip().split(\"\\n\"):\n velocities.append([float(tok) for tok in line.split()])\n\n # Parse the predictor-corrector data\n predictor_corrector = []\n predictor_corrector_preamble = None\n\n if len(chunks) > 2:\n lines = chunks[2].strip().split(\"\\n\")\n # There are 3 sets of 3xN Predictor corrector parameters\n # So can't be stored as a single set of \"site_property\"\n\n # First line in chunk is a key in CONTCAR\n # Second line is POTIM\n # Third line is the thermostat parameters\n predictor_corrector_preamble = (lines[0] + \"\\n\" + lines[1]\n + \"\\n\" + lines[2])\n # Rest is three sets of parameters, each set contains\n # x, y, z predictor-corrector parameters for every atom in orde\n lines = lines[3:]\n for st in range(nsites):\n d1 = [float(tok) for tok in lines[st].split()]\n d2 = [float(tok) for tok in lines[st+nsites].split()]\n d3 = [float(tok) for tok in lines[st+2*nsites].split()]\n predictor_corrector.append([d1,d2,d3])\n else:\n velocities = None\n predictor_corrector = None\n predictor_corrector_preamble = None\n\n return Poscar(struct, comment, selective_dynamics, vasp5_symbols,\n velocities=velocities,\n predictor_corrector=predictor_corrector,\n predictor_corrector_preamble=predictor_corrector_preamble)\n\n def get_string(self, direct=True, vasp4_compatible=False,\n significant_figures=6):\n \"\"\"\n Returns a string to be written as a POSCAR file. By default, site\n symbols are written, which means compatibility is for vasp >= 5.\n\n Args:\n direct (bool): Whether coordinates are output in direct or\n cartesian. Defaults to True.\n vasp4_compatible (bool): Set to True to omit site symbols on 6th\n line to maintain backward vasp 4.x compatibility. Defaults\n to False.\n significant_figures (int): No. of significant figures to\n output all quantities. Defaults to 6. Note that positions are\n output in fixed point, while velocities are output in\n scientific format.\n\n Returns:\n String representation of POSCAR.\n \"\"\"\n\n # This corrects for VASP really annoying bug of crashing on lattices\n # which have triple product < 0. We will just invert the lattice\n # vectors.\n latt = self.structure.lattice\n if np.linalg.det(latt.matrix) < 0:\n latt = Lattice(-latt.matrix)\n\n format_str = \"{{:.{0}f}}\".format(significant_figures)\n lines = [self.comment, \"1.0\"]\n for v in latt.matrix:\n lines.append(\" \".join([format_str.format(c) for c in v]))\n\n if self.true_names and not vasp4_compatible:\n lines.append(\" \".join(self.site_symbols))\n lines.append(\" \".join([str(x) for x in self.natoms]))\n if self.selective_dynamics:\n lines.append(\"Selective dynamics\")\n lines.append(\"direct\" if direct else \"cartesian\")\n\n selective_dynamics = self.selective_dynamics\n for (i, site) in enumerate(self.structure):\n coords = site.frac_coords if direct else site.coords\n line = \" \".join([format_str.format(c) for c in coords])\n if selective_dynamics is not None:\n sd = [\"T\" if j else \"F\" for j in selective_dynamics[i]]\n line += \" %s %s %s\" % (sd[0], sd[1], sd[2])\n line += \" \" + site.species_string\n lines.append(line)\n\n if self.velocities:\n try:\n lines.append(\"\")\n for v in self.velocities:\n lines.append(\" \".join([format_str.format(i) for i in v]))\n except:\n warnings.warn(\"Velocities are missing or corrupted.\")\n\n if self.predictor_corrector:\n lines.append(\"\")\n if self.predictor_corrector_preamble:\n lines.append(self.predictor_corrector_preamble)\n pred = np.array(self.predictor_corrector)\n for col in range(3):\n for z in pred[:,col]:\n lines.append(\" \".join([format_str.format(i) for i in z]))\n else:\n warnings.warn(\n \"Preamble information missing or corrupt. \" \n \"Writing Poscar with no predictor corrector data.\")\n\n return \"\\n\".join(lines) + \"\\n\"\n\n def __repr__(self):\n return self.get_string()\n\n def __str__(self):\n \"\"\"\n String representation of Poscar file.\n \"\"\"\n return self.get_string()\n\n def write_file(self, filename, **kwargs):\n \"\"\"\n Writes POSCAR to a file. The supported kwargs are the same as those for\n the Poscar.get_string method and are passed through directly.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.get_string(**kwargs))\n\n def as_dict(self):\n return {\"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__,\n \"structure\": self.structure.as_dict(),\n \"true_names\": self.true_names,\n \"selective_dynamics\": np.array(\n self.selective_dynamics).tolist(),\n \"velocities\": self.velocities,\n \"predictor_corrector\": self.predictor_corrector,\n \"comment\": self.comment}\n\n @classmethod\n def from_dict(cls, d):\n return Poscar(Structure.from_dict(d[\"structure\"]),\n comment=d[\"comment\"],\n selective_dynamics=d[\"selective_dynamics\"],\n true_names=d[\"true_names\"],\n velocities=d.get(\"velocities\", None),\n predictor_corrector=d.get(\"predictor_corrector\", None))\n\n def set_temperature(self, temperature):\n \"\"\"\n Initializes the velocities based on Maxwell-Boltzmann distribution.\n Removes linear, but not angular drift (same as VASP)\n\n Scales the energies to the exact temperature (microcanonical ensemble)\n Velocities are given in A/fs. This is the vasp default when\n direct/cartesian is not specified (even when positions are given in\n direct coordinates)\n\n Overwrites imported velocities, if any.\n\n Args:\n temperature (float): Temperature in Kelvin.\n \"\"\"\n # mean 0 variance 1\n velocities = np.random.randn(len(self.structure), 3)\n\n # in AMU, (N,1) array\n atomic_masses = np.array([site.specie.atomic_mass.to(\"kg\")\n for site in self.structure])\n dof = 3 * len(self.structure) - 3\n\n # scale velocities due to atomic masses\n # mean 0 std proportional to sqrt(1/m)\n velocities /= atomic_masses[:, np.newaxis] ** (1 / 2)\n\n # remove linear drift (net momentum)\n velocities -= np.average(atomic_masses[:, np.newaxis] * velocities,\n axis=0) / np.average(atomic_masses)\n\n # scale velocities to get correct temperature\n energy = np.sum(1 / 2 * atomic_masses *\n np.sum(velocities ** 2, axis=1))\n scale = (temperature * dof / (2 * energy / const.k)) ** (1 / 2)\n\n velocities *= scale * 1e-5 # these are in A/fs\n\n self.temperature = temperature\n try:\n del self.structure.site_properties[\"selective_dynamics\"]\n except KeyError:\n pass\n\n try:\n del self.structure.site_properties[\"predictor_corrector\"]\n except KeyError:\n pass\n # returns as a list of lists to be consistent with the other\n # initializations\n\n self.structure.add_site_property(\"velocities\", velocities.tolist())\n\n\nclass Incar(dict, MSONable):\n \"\"\"\n INCAR object for reading and writing INCAR files. Essentially consists of\n a dictionary with some helper functions\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"\n Creates an Incar object.\n\n Args:\n params (dict): A set of input parameters as a dictionary.\n \"\"\"\n super(Incar, self).__init__()\n if params:\n\n # if Incar contains vector-like magmoms given as a list\n # of floats, convert to a list of lists\n if (params.get(\"MAGMOM\") and isinstance(params[\"MAGMOM\"][0], (int, float))) \\\n and (params.get(\"LSORBIT\") or params.get(\"LNONCOLLINEAR\")):\n val = []\n for i in range(len(params[\"MAGMOM\"])//3):\n val.append(params[\"MAGMOM\"][i*3:(i+1)*3])\n params[\"MAGMOM\"] = val\n\n self.update(params)\n\n def __setitem__(self, key, val):\n \"\"\"\n Add parameter-val pair to Incar. Warns if parameter is not in list of\n valid INCAR tags. Also cleans the parameter and val by stripping\n leading and trailing white spaces.\n \"\"\"\n super(Incar, self).__setitem__(\n key.strip(), Incar.proc_val(key.strip(), val.strip())\n if isinstance(val, str) else val)\n\n def as_dict(self):\n d = dict(self)\n d[\"@module\"] = self.__class__.__module__\n d[\"@class\"] = self.__class__.__name__\n return d\n\n @classmethod\n def from_dict(cls, d):\n if d.get(\"MAGMOM\") and isinstance(d[\"MAGMOM\"][0], dict):\n d[\"MAGMOM\"] = [Magmom.from_dict(m) for m in d[\"MAGMOM\"]]\n return Incar({k: v for k, v in d.items() if k not in (\"@module\",\n \"@class\")})\n\n def get_string(self, sort_keys=False, pretty=False):\n \"\"\"\n Returns a string representation of the INCAR. The reason why this\n method is different from the __str__ method is to provide options for\n pretty printing.\n\n Args:\n sort_keys (bool): Set to True to sort the INCAR parameters\n alphabetically. Defaults to False.\n pretty (bool): Set to True for pretty aligned output. Defaults\n to False.\n \"\"\"\n keys = self.keys()\n if sort_keys:\n keys = sorted(keys)\n lines = []\n for k in keys:\n if k == \"MAGMOM\" and isinstance(self[k], list):\n value = []\n\n if (isinstance(self[k][0], list) or isinstance(self[k][0], Magmom)) and \\\n (self.get(\"LSORBIT\") or self.get(\"LNONCOLLINEAR\")):\n value.append(\" \".join(str(i) for j in self[k] for i in j))\n elif self.get(\"LSORBIT\") or self.get(\"LNONCOLLINEAR\"):\n for m, g in itertools.groupby(self[k]):\n value.append(\"3*{}*{}\".format(len(tuple(g)), m))\n else:\n # float() to ensure backwards compatibility between\n # float magmoms and Magmom objects\n for m, g in itertools.groupby(self[k], lambda x: float(x)):\n value.append(\"{}*{}\".format(len(tuple(g)), m))\n\n lines.append([k, \" \".join(value)])\n elif isinstance(self[k], list):\n lines.append([k, \" \".join([str(i) for i in self[k]])])\n else:\n lines.append([k, self[k]])\n\n if pretty:\n return str(tabulate([[l[0], \"=\", l[1]] for l in lines],\n tablefmt=\"plain\"))\n else:\n return str_delimited(lines, None, \" = \") + \"\\n\"\n\n def __str__(self):\n return self.get_string(sort_keys=True, pretty=False)\n\n def write_file(self, filename):\n \"\"\"\n Write Incar to a file.\n\n Args:\n filename (str): filename to write to.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__())\n\n @staticmethod\n def from_file(filename):\n \"\"\"\n Reads an Incar object from a file.\n\n Args:\n filename (str): Filename for file\n\n Returns:\n Incar object\n \"\"\"\n with zopen(filename, \"rt\") as f:\n return Incar.from_string(f.read())\n\n @staticmethod\n def from_string(string):\n \"\"\"\n Reads an Incar object from a string.\n\n Args:\n string (str): Incar string\n\n Returns:\n Incar object\n \"\"\"\n lines = list(clean_lines(string.splitlines()))\n params = {}\n for line in lines:\n for sline in line.split(';'):\n m = re.match(r'(\\w+)\\s*=\\s*(.*)', sline.strip())\n if m:\n key = m.group(1).strip()\n val = m.group(2).strip()\n val = Incar.proc_val(key, val)\n params[key] = val\n return Incar(params)\n\n @staticmethod\n def proc_val(key, val):\n \"\"\"\n Static helper method to convert INCAR parameters to proper types, e.g.,\n integers, floats, lists, etc.\n\n Args:\n key: INCAR parameter key\n val: Actual value of INCAR parameter.\n \"\"\"\n list_keys = (\"LDAUU\", \"LDAUL\", \"LDAUJ\", \"MAGMOM\", \"DIPOL\",\n \"LANGEVIN_GAMMA\", \"QUAD_EFG\", \"EINT\")\n bool_keys = (\"LDAU\", \"LWAVE\", \"LSCALU\", \"LCHARG\", \"LPLANE\", \"LUSE_VDW\",\n \"LHFCALC\", \"ADDGRID\", \"LSORBIT\", \"LNONCOLLINEAR\")\n float_keys = (\"EDIFF\", \"SIGMA\", \"TIME\", \"ENCUTFOCK\", \"HFSCREEN\",\n \"POTIM\", \"EDIFFG\", \"AGGAC\", \"PARAM1\", \"PARAM2\")\n int_keys = (\"NSW\", \"NBANDS\", \"NELMIN\", \"ISIF\", \"IBRION\", \"ISPIN\",\n \"ICHARG\", \"NELM\", \"ISMEAR\", \"NPAR\", \"LDAUPRINT\", \"LMAXMIX\",\n \"ENCUT\", \"NSIM\", \"NKRED\", \"NUPDOWN\", \"ISPIND\", \"LDAUTYPE\",\n \"IVDW\")\n\n def smart_int_or_float(numstr):\n if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n return float(numstr)\n else:\n return int(numstr)\n\n try:\n if key in list_keys:\n output = []\n toks = re.findall(\n r\"(-?\\d+\\.?\\d*)\\*?(-?\\d+\\.?\\d*)?\\*?(-?\\d+\\.?\\d*)?\", val)\n for tok in toks:\n if tok[2] and \"3\" in tok[0]:\n output.extend(\n [smart_int_or_float(tok[2])] * int(tok[0])\n * int(tok[1]))\n elif tok[1]:\n output.extend([smart_int_or_float(tok[1])] *\n int(tok[0]))\n else:\n output.append(smart_int_or_float(tok[0]))\n return output\n if key in bool_keys:\n m = re.match(r\"^\\.?([T|F|t|f])[A-Za-z]*\\.?\", val)\n if m:\n if m.group(1) == \"T\" or m.group(1) == \"t\":\n return True\n else:\n return False\n raise ValueError(key + \" should be a boolean type!\")\n\n if key in float_keys:\n return float(re.search(r\"^-?\\d*\\.?\\d*[e|E]?-?\\d*\", val).group(0))\n\n if key in int_keys:\n return int(re.match(r\"^-?[0-9]+\", val).group(0))\n\n except ValueError:\n pass\n\n # Not in standard keys. We will try a hierarchy of conversions.\n try:\n val = int(val)\n return val\n except ValueError:\n pass\n\n try:\n val = float(val)\n return val\n except ValueError:\n pass\n\n if \"true\" in val.lower():\n return True\n\n if \"false\" in val.lower():\n return False\n\n return val.strip().capitalize()\n\n def diff(self, other):\n \"\"\"\n Diff function for Incar. Compares two Incars and indicates which\n parameters are the same and which are not. Useful for checking whether\n two runs were done using the same parameters.\n\n Args:\n other (Incar): The other Incar object to compare to.\n\n Returns:\n Dict of the following format:\n {\"Same\" : parameters_that_are_the_same,\n \"Different\": parameters_that_are_different}\n Note that the parameters are return as full dictionaries of values.\n E.g. {\"ISIF\":3}\n \"\"\"\n similar_param = {}\n different_param = {}\n for k1, v1 in self.items():\n if k1 not in other:\n different_param[k1] = {\"INCAR1\": v1, \"INCAR2\": None}\n elif v1 != other[k1]:\n different_param[k1] = {\"INCAR1\": v1, \"INCAR2\": other[k1]}\n else:\n similar_param[k1] = v1\n for k2, v2 in other.items():\n if k2 not in similar_param and k2 not in different_param:\n if k2 not in self:\n different_param[k2] = {\"INCAR1\": None, \"INCAR2\": v2}\n return {\"Same\": similar_param, \"Different\": different_param}\n\n def __add__(self, other):\n \"\"\"\n Add all the values of another INCAR object to this object.\n Facilitates the use of \"standard\" INCARs.\n \"\"\"\n params = {k: v for k, v in self.items()}\n for k, v in other.items():\n if k in self and v != self[k]:\n raise ValueError(\"Incars have conflicting values!\")\n else:\n params[k] = v\n return Incar(params)\n\n\nclass Kpoints_supported_modes(Enum):\n Automatic = 0\n Gamma = 1\n Monkhorst = 2\n Line_mode = 3\n Cartesian = 4\n Reciprocal = 5\n\n def __str__(self):\n return self.name\n\n @staticmethod\n def from_string(s):\n c = s.lower()[0]\n for m in Kpoints_supported_modes:\n if m.name.lower()[0] == c:\n return m\n raise ValueError(\"Can't interprete Kpoint mode %s\" % s)\n\n\nclass Kpoints(MSONable):\n \"\"\"\n KPOINT reader/writer.\n \"\"\"\n supported_modes = Kpoints_supported_modes\n\n def __init__(self, comment=\"Default gamma\", num_kpts=0,\n style=supported_modes.Gamma,\n kpts=((1, 1, 1),), kpts_shift=(0, 0, 0),\n kpts_weights=None, coord_type=None, labels=None,\n tet_number=0, tet_weight=0, tet_connections=None):\n \"\"\"\n Highly flexible constructor for Kpoints object. The flexibility comes\n at the cost of usability and in general, it is recommended that you use\n the default constructor only if you know exactly what you are doing and\n requires the flexibility. For most usage cases, the three automatic\n schemes can be constructed far more easily using the convenience static\n constructors (automatic, gamma_automatic, monkhorst_automatic) and it\n is recommended that you use those.\n\n Args:\n comment (str): String comment for Kpoints\n num_kpts: Following VASP method of defining the KPOINTS file, this\n parameter is the number of kpoints specified. If set to 0\n (or negative), VASP automatically generates the KPOINTS.\n style: Style for generating KPOINTS. Use one of the\n Kpoints.supported_modes enum types.\n kpts (2D array): 2D array of kpoints. Even when only a single\n specification is required, e.g. in the automatic scheme,\n the kpts should still be specified as a 2D array. e.g.,\n [[20]] or [[2,2,2]].\n kpts_shift (3x1 array): Shift for Kpoints.\n kpts_weights: Optional weights for kpoints. Weights should be\n integers. For explicit kpoints.\n coord_type: In line-mode, this variable specifies whether the\n Kpoints were given in Cartesian or Reciprocal coordinates.\n labels: In line-mode, this should provide a list of labels for\n each kpt. It is optional in explicit kpoint mode as comments for\n k-points.\n tet_number: For explicit kpoints, specifies the number of\n tetrahedrons for the tetrahedron method.\n tet_weight: For explicit kpoints, specifies the weight for each\n tetrahedron for the tetrahedron method.\n tet_connections: For explicit kpoints, specifies the connections\n of the tetrahedrons for the tetrahedron method.\n Format is a list of tuples, [ (sym_weight, [tet_vertices]),\n ...]\n\n The default behavior of the constructor is for a Gamma centered,\n 1x1x1 KPOINTS with no shift.\n \"\"\"\n if num_kpts > 0 and (not labels) and (not kpts_weights):\n raise ValueError(\"For explicit or line-mode kpoints, either the \"\n \"labels or kpts_weights must be specified.\")\n\n self.comment = comment\n self.num_kpts = num_kpts\n self.kpts = kpts\n self.style = style\n self.coord_type = coord_type\n self.kpts_weights = kpts_weights\n self.kpts_shift = kpts_shift\n self.labels = labels\n self.tet_number = tet_number\n self.tet_weight = tet_weight\n self.tet_connections = tet_connections\n\n @property\n def style(self):\n return self._style\n\n @style.setter\n def style(self, style):\n if isinstance(style, str):\n style = Kpoints.supported_modes.from_string(style)\n\n if style in (Kpoints.supported_modes.Automatic,\n Kpoints.supported_modes.Gamma,\n Kpoints.supported_modes.Monkhorst) and len(self.kpts) > 1:\n raise ValueError(\"For fully automatic or automatic gamma or monk \"\n \"kpoints, only a single line for the number of \"\n \"divisions is allowed.\")\n\n self._style = style\n\n @staticmethod\n def automatic(subdivisions):\n \"\"\"\n Convenient static constructor for a fully automatic Kpoint grid, with\n gamma centered Monkhorst-Pack grids and the number of subdivisions\n along each reciprocal lattice vector determined by the scheme in the\n VASP manual.\n\n Args:\n subdivisions: Parameter determining number of subdivisions along\n each reciprocal lattice vector.\n\n Returns:\n Kpoints object\n \"\"\"\n return Kpoints(\"Fully automatic kpoint scheme\", 0,\n style=Kpoints.supported_modes.Automatic,\n kpts=[[subdivisions]])\n\n @staticmethod\n def gamma_automatic(kpts=(1, 1, 1), shift=(0, 0, 0)):\n \"\"\"\n Convenient static constructor for an automatic Gamma centered Kpoint\n grid.\n\n Args:\n kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice\n vectors. Defaults to (1,1,1)\n shift: Shift to be applied to the kpoints. Defaults to (0,0,0).\n\n Returns:\n Kpoints object\n \"\"\"\n return Kpoints(\"Automatic kpoint scheme\", 0,\n Kpoints.supported_modes.Gamma, kpts=[kpts],\n kpts_shift=shift)\n\n @staticmethod\n def monkhorst_automatic(kpts=(2, 2, 2), shift=(0, 0, 0)):\n \"\"\"\n Convenient static constructor for an automatic Monkhorst pack Kpoint\n grid.\n\n Args:\n kpts: Subdivisions N_1, N_2 and N_3 along reciprocal lattice\n vectors. Defaults to (2,2,2)\n shift: Shift to be applied to the kpoints. Defaults to (0,0,0).\n\n Returns:\n Kpoints object\n \"\"\"\n return Kpoints(\"Automatic kpoint scheme\", 0,\n Kpoints.supported_modes.Monkhorst, kpts=[kpts],\n kpts_shift=shift)\n\n @staticmethod\n def automatic_density(structure, kppa, force_gamma=False):\n \"\"\"\n Returns an automatic Kpoint object based on a structure and a kpoint\n density. Uses Gamma centered meshes for hexagonal cells and\n Monkhorst-Pack grids otherwise.\n\n Algorithm:\n Uses a simple approach scaling the number of divisions along each\n reciprocal lattice vector proportional to its length.\n\n Args:\n structure (Structure): Input structure\n kppa (int): Grid density\n force_gamma (bool): Force a gamma centered mesh (default is to\n use gamma only for hexagonal cells or odd meshes)\n\n Returns:\n Kpoints\n \"\"\"\n comment = \"pymatgen 4.7.6+ generated KPOINTS with grid density = \" + \\\n \"%.0f / atom\" % kppa\n if math.fabs((math.floor(kppa ** (1 / 3) + 0.5)) ** 3 - kppa) < 1:\n kppa += kppa * 0.01\n latt = structure.lattice\n lengths = latt.abc\n ngrid = kppa / structure.num_sites\n mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)\n\n num_div = [int(math.floor(max(mult / l, 1))) for l in lengths]\n\n is_hexagonal = latt.is_hexagonal()\n\n has_odd = any([i % 2 == 1 for i in num_div])\n if has_odd or is_hexagonal or force_gamma:\n style = Kpoints.supported_modes.Gamma\n else:\n style = Kpoints.supported_modes.Monkhorst\n\n return Kpoints(comment, 0, style, [num_div], [0, 0, 0])\n\n @staticmethod\n def automatic_gamma_density(structure, kppa):\n \"\"\"\n Returns an automatic Kpoint object based on a structure and a kpoint\n density. Uses Gamma centered meshes always. For GW.\n\n Algorithm:\n Uses a simple approach scaling the number of divisions along each\n reciprocal lattice vector proportional to its length.\n\n Args:\n structure:\n Input structure\n kppa:\n Grid density\n \"\"\"\n\n latt = structure.lattice\n lengths = latt.abc\n ngrid = kppa / structure.num_sites\n\n mult = (ngrid * lengths[0] * lengths[1] * lengths[2]) ** (1 / 3)\n num_div = [int(round(mult / l)) for l in lengths]\n\n # ensure that numDiv[i] > 0\n num_div = [i if i > 0 else 1 for i in num_div]\n\n # VASP documentation recommends to use even grids for n <= 8 and odd\n # grids for n > 8.\n num_div = [i + i % 2 if i <= 8 else i - i % 2 + 1 for i in num_div]\n\n style = Kpoints.supported_modes.Gamma\n\n comment = \"pymatgen 4.7.6+ generated KPOINTS with grid density = \" + \\\n \"{} / atom\".format(kppa)\n num_kpts = 0\n return Kpoints(comment, num_kpts, style, [num_div], [0, 0, 0])\n\n @staticmethod\n def automatic_density_by_vol(structure, kppvol, force_gamma=False):\n \"\"\"\n Returns an automatic Kpoint object based on a structure and a kpoint\n density per inverse Angstrom^3 of reciprocal cell.\n\n Algorithm:\n Same as automatic_density()\n\n Args:\n structure (Structure): Input structure\n kppvol (int): Grid density per Angstrom^(-3) of reciprocal cell\n force_gamma (bool): Force a gamma centered mesh\n\n Returns:\n Kpoints\n \"\"\"\n vol = structure.lattice.reciprocal_lattice.volume\n kppa = kppvol * vol * structure.num_sites\n return Kpoints.automatic_density(structure, kppa,\n force_gamma=force_gamma)\n\n @staticmethod\n def automatic_linemode(divisions, ibz):\n \"\"\"\n Convenient static constructor for a KPOINTS in mode line_mode.\n gamma centered Monkhorst-Pack grids and the number of subdivisions\n along each reciprocal lattice vector determined by the scheme in the\n VASP manual.\n\n Args:\n divisions: Parameter determining the number of k-points along each\n hight symetry lines.\n ibz: HighSymmKpath object (pymatgen.symmetry.bandstructure)\n\n Returns:\n Kpoints object\n \"\"\"\n kpoints = list()\n labels = list()\n for path in ibz.kpath[\"path\"]:\n kpoints.append(ibz.kpath[\"kpoints\"][path[0]])\n labels.append(path[0])\n for i in range(1, len(path) - 1):\n kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n labels.append(path[i])\n kpoints.append(ibz.kpath[\"kpoints\"][path[i]])\n labels.append(path[i])\n\n kpoints.append(ibz.kpath[\"kpoints\"][path[-1]])\n labels.append(path[-1])\n\n return Kpoints(\"Line_mode KPOINTS file\",\n style=Kpoints.supported_modes.Line_mode,\n coord_type=\"Reciprocal\",\n kpts=kpoints,\n labels=labels,\n num_kpts=int(divisions))\n\n @staticmethod\n def from_file(filename):\n \"\"\"\n Reads a Kpoints object from a KPOINTS file.\n\n Args:\n filename (str): filename to read from.\n\n Returns:\n Kpoints object\n \"\"\"\n with zopen(filename, \"rt\") as f:\n return Kpoints.from_string(f.read())\n\n @staticmethod\n def from_string(string):\n \"\"\"\n Reads a Kpoints object from a KPOINTS string.\n\n Args:\n string (str): KPOINTS string.\n\n Returns:\n Kpoints object\n \"\"\"\n lines = [line.strip() for line in string.splitlines()]\n\n comment = lines[0]\n num_kpts = int(lines[1].split()[0].strip())\n style = lines[2].lower()[0]\n\n # Fully automatic KPOINTS\n if style == \"a\":\n return Kpoints.automatic(int(lines[3]))\n\n coord_pattern = re.compile(r'^\\s*([\\d+.\\-Ee]+)\\s+([\\d+.\\-Ee]+)\\s+'\n r'([\\d+.\\-Ee]+)')\n\n # Automatic gamma and Monk KPOINTS, with optional shift\n if style == \"g\" or style == \"m\":\n kpts = [int(i) for i in lines[3].split()]\n kpts_shift = (0, 0, 0)\n if len(lines) > 4 and coord_pattern.match(lines[4]):\n try:\n kpts_shift = [float(i) for i in lines[4].split()]\n except ValueError:\n pass\n return Kpoints.gamma_automatic(kpts, kpts_shift) if style == \"g\" \\\n else Kpoints.monkhorst_automatic(kpts, kpts_shift)\n\n # Automatic kpoints with basis\n if num_kpts <= 0:\n style = Kpoints.supported_modes.Cartesian if style in \"ck\" \\\n else Kpoints.supported_modes.Reciprocal\n kpts = [[float(j) for j in lines[i].split()] for i in range(3, 6)]\n kpts_shift = [float(i) for i in lines[6].split()]\n return Kpoints(comment=comment, num_kpts=num_kpts, style=style,\n kpts=kpts, kpts_shift=kpts_shift)\n\n # Line-mode KPOINTS, usually used with band structures\n if style == \"l\":\n coord_type = \"Cartesian\" if lines[3].lower()[0] in \"ck\" \\\n else \"Reciprocal\"\n style = Kpoints.supported_modes.Line_mode\n kpts = []\n labels = []\n patt = re.compile(r'([e0-9.\\-]+)\\s+([e0-9.\\-]+)\\s+([e0-9.\\-]+)'\n r'\\s*!*\\s*(.*)')\n for i in range(4, len(lines)):\n line = lines[i]\n m = patt.match(line)\n if m:\n kpts.append([float(m.group(1)), float(m.group(2)),\n float(m.group(3))])\n labels.append(m.group(4).strip())\n return Kpoints(comment=comment, num_kpts=num_kpts, style=style,\n kpts=kpts, coord_type=coord_type, labels=labels)\n\n # Assume explicit KPOINTS if all else fails.\n style = Kpoints.supported_modes.Cartesian if style in \"ck\" \\\n else Kpoints.supported_modes.Reciprocal\n kpts = []\n kpts_weights = []\n labels = []\n tet_number = 0\n tet_weight = 0\n tet_connections = None\n\n for i in range(3, 3 + num_kpts):\n toks = lines[i].split()\n kpts.append([float(j) for j in toks[0:3]])\n kpts_weights.append(float(toks[3]))\n if len(toks) > 4:\n labels.append(toks[4])\n else:\n labels.append(None)\n try:\n # Deal with tetrahedron method\n if lines[3 + num_kpts].strip().lower()[0] == \"t\":\n toks = lines[4 + num_kpts].split()\n tet_number = int(toks[0])\n tet_weight = float(toks[1])\n tet_connections = []\n for i in range(5 + num_kpts, 5 + num_kpts + tet_number):\n toks = lines[i].split()\n tet_connections.append((int(toks[0]),\n [int(toks[j])\n for j in range(1, 5)]))\n except IndexError:\n pass\n\n return Kpoints(comment=comment, num_kpts=num_kpts,\n style=Kpoints.supported_modes[str(style)],\n kpts=kpts, kpts_weights=kpts_weights,\n tet_number=tet_number, tet_weight=tet_weight,\n tet_connections=tet_connections, labels=labels)\n\n def write_file(self, filename):\n \"\"\"\n Write Kpoints to a file.\n\n Args:\n filename (str): Filename to write to.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__())\n\n def __repr__(self):\n return self.__str__()\n\n def __str__(self):\n lines = [self.comment, str(self.num_kpts), self.style.name]\n style = self.style.name.lower()[0]\n if style == \"l\":\n lines.append(self.coord_type)\n for i in range(len(self.kpts)):\n lines.append(\" \".join([str(x) for x in self.kpts[i]]))\n if style == \"l\":\n lines[-1] += \" ! \" + self.labels[i]\n if i % 2 == 1:\n lines[-1] += \"\\n\"\n elif self.num_kpts > 0:\n if self.labels is not None:\n lines[-1] += \" %i %s\" % (self.kpts_weights[i],\n self.labels[i])\n else:\n lines[-1] += \" %i\" % (self.kpts_weights[i])\n\n # Print tetrahedron parameters if the number of tetrahedrons > 0\n if style not in \"lagm\" and self.tet_number > 0:\n lines.append(\"Tetrahedron\")\n lines.append(\"%d %f\" % (self.tet_number, self.tet_weight))\n for sym_weight, vertices in self.tet_connections:\n lines.append(\"%d %d %d %d %d\" % (sym_weight, vertices[0],\n vertices[1], vertices[2],\n vertices[3]))\n\n # Print shifts for automatic kpoints types if not zero.\n if self.num_kpts <= 0 and tuple(self.kpts_shift) != (0, 0, 0):\n lines.append(\" \".join([str(x) for x in self.kpts_shift]))\n return \"\\n\".join(lines) + \"\\n\"\n\n def as_dict(self):\n \"\"\"json friendly dict representation of Kpoints\"\"\"\n d = {\"comment\": self.comment, \"nkpoints\": self.num_kpts,\n \"generation_style\": self.style.name, \"kpoints\": self.kpts,\n \"usershift\": self.kpts_shift,\n \"kpts_weights\": self.kpts_weights, \"coord_type\": self.coord_type,\n \"labels\": self.labels, \"tet_number\": self.tet_number,\n \"tet_weight\": self.tet_weight,\n \"tet_connections\": self.tet_connections}\n\n optional_paras = [\"genvec1\", \"genvec2\", \"genvec3\", \"shift\"]\n for para in optional_paras:\n if para in self.__dict__:\n d[para] = self.__dict__[para]\n d[\"@module\"] = self.__class__.__module__\n d[\"@class\"] = self.__class__.__name__\n return d\n\n @classmethod\n def from_dict(cls, d):\n comment = d.get(\"comment\", \"\")\n generation_style = d.get(\"generation_style\")\n kpts = d.get(\"kpoints\", [[1, 1, 1]])\n kpts_shift = d.get(\"usershift\", [0, 0, 0])\n num_kpts = d.get(\"nkpoints\", 0)\n return cls(comment=comment, kpts=kpts, style=generation_style,\n kpts_shift=kpts_shift, num_kpts=num_kpts,\n kpts_weights=d.get(\"kpts_weights\"),\n coord_type=d.get(\"coord_type\"),\n labels=d.get(\"labels\"), tet_number=d.get(\"tet_number\", 0),\n tet_weight=d.get(\"tet_weight\", 0),\n tet_connections=d.get(\"tet_connections\"))\n\n\ndef parse_string(s):\n return \"{}\".format(s.strip())\n\n\ndef parse_bool(s):\n m = re.match(r\"^\\.?([TFtf])[A-Za-z]*\\.?\", s)\n if m:\n if m.group(1) == \"T\" or m.group(1) == \"t\":\n return True\n else:\n return False\n raise ValueError(s + \" should be a boolean type!\")\n\n\ndef parse_float(s):\n return float(re.search(r\"^-?\\d*\\.?\\d*[eE]?-?\\d*\", s).group(0))\n\n\ndef parse_int(s):\n return int(re.match(r\"^-?[0-9]+\", s).group(0))\n\n\ndef parse_list(s):\n return [float(y) for y in re.split(r\"\\s+\", s.strip()) if not y.isalpha()]\n\n\nOrbital = namedtuple('Orbital', ['n', 'l', 'j', 'E', 'occ'])\nOrbitalDescription = namedtuple('OrbitalDescription',\n ['l', 'E', 'Type', \"Rcut\", \"Type2\", \"Rcut2\"])\n\n\nclass PotcarSingle:\n \"\"\"\n Object for a **single** POTCAR. The builder assumes the complete string is\n the POTCAR contains the complete untouched data in \"data\" as a string and\n a dict of keywords.\n\n Args:\n data:\n Complete and single potcar file as a string.\n\n .. attribute:: data\n\n POTCAR data as a string.\n\n .. attribute:: keywords\n\n Keywords parsed from the POTCAR as a dict. All keywords are also\n accessible as attributes in themselves. E.g., potcar.enmax,\n potcar.encut, etc.\n \"\"\"\n functional_dir = {\"PBE\": \"POT_GGA_PAW_PBE\",\n \"PBE_52\": \"POT_GGA_PAW_PBE_52\",\n \"PBE_54\": \"POT_GGA_PAW_PBE_54\",\n \"LDA\": \"POT_LDA_PAW\",\n \"LDA_52\": \"POT_LDA_PAW_52\",\n \"LDA_54\": \"POT_LDA_PAW_54\",\n \"PW91\": \"POT_GGA_PAW_PW91\",\n \"LDA_US\": \"POT_LDA_US\",\n \"PW91_US\": \"POT_GGA_US_PW91\"}\n\n functional_tags = {\"pe\": {\"name\": \"PBE\", \"class\": \"GGA\"},\n \"91\": {\"name\": \"PW91\", \"class\": \"GGA\"},\n \"rp\": {\"name\": \"revPBE\", \"class\": \"GGA\"},\n \"am\": {\"name\": \"AM05\", \"class\": \"GGA\"},\n \"ps\": {\"name\": \"PBEsol\", \"class\": \"GGA\"},\n \"pw\": {\"name\": \"PW86\", \"class\": \"GGA\"},\n \"lm\": {\"name\": \"Langreth-Mehl-Hu\", \"class\": \"GGA\"},\n \"pb\": {\"name\": \"Perdew-Becke\", \"class\": \"GGA\"},\n \"ca\": {\"name\": \"Perdew-Zunger81\", \"class\": \"LDA\"},\n \"hl\": {\"name\": \"Hedin-Lundquist\", \"class\": \"LDA\"},\n \"wi\": {\"name\": \"Wigner Interpoloation\", \"class\": \"LDA\"}}\n\n parse_functions = {\"LULTRA\": parse_bool,\n \"LUNSCR\": parse_bool,\n \"LCOR\": parse_bool,\n \"LPAW\": parse_bool,\n \"EATOM\": parse_float,\n \"RPACOR\": parse_float,\n \"POMASS\": parse_float,\n \"ZVAL\": parse_float,\n \"RCORE\": parse_float,\n \"RWIGS\": parse_float,\n \"ENMAX\": parse_float,\n \"ENMIN\": parse_float,\n \"EMMIN\": parse_float,\n \"EAUG\": parse_float,\n \"DEXC\": parse_float,\n \"RMAX\": parse_float,\n \"RAUG\": parse_float,\n \"RDEP\": parse_float,\n \"RDEPT\": parse_float,\n \"QCUT\": parse_float,\n \"QGAM\": parse_float,\n \"RCLOC\": parse_float,\n \"IUNSCR\": parse_int,\n \"ICORE\": parse_int,\n \"NDATA\": parse_int,\n \"VRHFIN\": parse_string,\n \"LEXCH\": parse_string,\n \"TITEL\": parse_string,\n \"STEP\": parse_list,\n \"RRKJ\": parse_list,\n \"GGA\": parse_list}\n\n def __init__(self, data):\n self.data = data # raw POTCAR as a string\n\n # Vasp parses header in vasprun.xml and this differs from the titel\n self.header = data.split(\"\\n\")[0].strip()\n\n search_lines = re.search(r\"(?s)(parameters from PSCTR are:\"\n r\".*?END of PSCTR-controll parameters)\",\n data).group(1)\n\n self.keywords = {}\n for key, val in re.findall(r\"(\\S+)\\s*=\\s*(.*?)(?=;|$)\",\n search_lines, flags=re.MULTILINE):\n try:\n self.keywords[key] = self.parse_functions[key](val)\n except KeyError:\n warnings.warn(\"Ignoring unknown variable type %s\" % key)\n\n PSCTR = OrderedDict()\n\n array_search = re.compile(r\"(-*[0-9.]+)\")\n orbitals = []\n descriptions = []\n atomic_configuration = re.search(r\"Atomic configuration\\s*\\n?\"\n r\"(.*?)Description\", search_lines)\n if atomic_configuration:\n lines = atomic_configuration.group(1).splitlines()\n num_entries = re.search(r\"([0-9]+)\", lines[0]).group(1)\n num_entries = int(num_entries)\n PSCTR['nentries'] = num_entries\n for line in lines[1:]:\n orbit = array_search.findall(line)\n if orbit:\n orbitals.append(self.Orbital(int(orbit[0]),\n int(orbit[1]),\n float(orbit[2]),\n float(orbit[3]),\n float(orbit[4])))\n PSCTR['Orbitals'] = tuple(orbitals)\n\n description_string = re.search(r\"(?s)Description\\s*\\n\"\n r\"(.*?)Error from kinetic\"\n r\" energy argument \\(eV\\)\",\n search_lines)\n if description_string:\n for line in description_string.group(1).splitlines():\n description = array_search.findall(line)\n if description:\n descriptions.append(\n OrbitalDescription(\n int(description[0]), float(description[1]),\n int(description[2]), float(description[3]),\n int(description[4]) if len(description) > 4 else None,\n float(description[5]) if len(description) > 4 else None))\n\n if descriptions:\n PSCTR['OrbitalDescriptions'] = tuple(descriptions)\n\n rrkj_kinetic_energy_string = re.search(\n r\"(?s)Error from kinetic energy argument \\(eV\\)\\s*\\n\"\n r\"(.*?)END of PSCTR-controll parameters\",\n search_lines)\n rrkj_array = []\n if rrkj_kinetic_energy_string:\n for line in rrkj_kinetic_energy_string.group(1).splitlines():\n if \"=\" not in line:\n rrkj_array += parse_list(line.strip('\\n'))\n if rrkj_array:\n PSCTR['RRKJ'] = tuple(rrkj_array)\n\n PSCTR.update(self.keywords)\n self.PSCTR = OrderedDict(sorted(PSCTR.items(), key=lambda x: x[0]))\n self.hash = self.get_potcar_hash()\n\n def __str__(self):\n return self.data + \"\\n\"\n\n @property\n def electron_configuration(self):\n if not self.nelectrons.is_integer():\n warnings.warn(\"POTCAR has non-integer charge, \"\n \"electron configuration not well-defined.\")\n return None\n el = Element.from_Z(self.atomic_no)\n full_config = el.full_electronic_structure\n nelect = self.nelectrons\n config = []\n while nelect > 0:\n e = full_config.pop(-1)\n config.append(e)\n nelect -= e[-1]\n return config\n\n def write_file(self, filename):\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__())\n\n @staticmethod\n def from_file(filename):\n try:\n with zopen(filename, \"rt\") as f:\n return PotcarSingle(f.read())\n except UnicodeDecodeError:\n warnings.warn(\"POTCAR contains invalid unicode errors. \"\n \"We will attempt to read it by ignoring errors.\")\n import codecs\n with codecs.open(filename, \"r\", encoding=\"utf-8\",\n errors=\"ignore\") as f:\n return PotcarSingle(f.read())\n\n @staticmethod\n def from_symbol_and_functional(symbol, functional=None):\n if functional is None:\n functional = SETTINGS.get(\"PMG_DEFAULT_FUNCTIONAL\", \"PBE\")\n funcdir = PotcarSingle.functional_dir[functional]\n d = SETTINGS.get(\"PMG_VASP_PSP_DIR\")\n if d is None:\n raise ValueError(\n \"No POTCAR for %s with functional %s found. \"\n \"Please set the PMG_VASP_PSP_DIR environment in \"\n \".pmgrc.yaml, or you may need to set \"\n \"PMG_DEFAULT_FUNCTIONAL to PBE_52 or PBE_54 if you \"\n \"are using newer psps from VASP.\" % (symbol, functional))\n paths_to_try = [os.path.join(d, funcdir, \"POTCAR.{}\".format(symbol)),\n os.path.join(d, funcdir, symbol, \"POTCAR\")]\n for p in paths_to_try:\n p = os.path.expanduser(p)\n p = zpath(p)\n if os.path.exists(p):\n return PotcarSingle.from_file(p)\n raise IOError(\"You do not have the right POTCAR with functional \" +\n \"{} and label {} in your VASP_PSP_DIR\".format(functional,\n symbol))\n\n @property\n def symbol(self):\n \"\"\"\n Symbol of POTCAR, e.g., Fe_pv\n \"\"\"\n return self.keywords[\"TITEL\"].split(\" \")[1].strip()\n\n @property\n def element(self):\n \"\"\"\n Attempt to return the atomic symbol based on the VRHFIN keyword.\n \"\"\"\n element = self.keywords[\"VRHFIN\"].split(\":\")[0].strip()\n try:\n return Element(element).symbol\n except ValueError:\n # VASP incorrectly gives the element symbol for Xe as \"X\"\n # Some potentials, e.g., Zr_sv, gives the symbol as r.\n if element == \"X\":\n return \"Xe\"\n return Element(self.symbol.split(\"_\")[0]).symbol\n\n @property\n def atomic_no(self):\n \"\"\"\n Attempt to return the atomic number based on the VRHFIN keyword.\n \"\"\"\n return Element(self.element).Z\n\n @property\n def nelectrons(self):\n return self.zval\n\n @property\n def potential_type(self):\n if self.lultra:\n return \"US\"\n elif self.lpaw:\n return \"PAW\"\n else:\n return \"NC\"\n\n @property\n def functional(self):\n return self.functional_tags.get(self.LEXCH.lower(), {}).get('name')\n\n @property\n def functional_class(self):\n return self.functional_tags.get(self.LEXCH.lower(), {}).get('class')\n\n def get_potcar_hash(self):\n hash_str = \"\"\n for k, v in self.PSCTR.items():\n hash_str += \"{}\".format(k)\n if isinstance(v, int):\n hash_str += \"{}\".format(v)\n elif isinstance(v, float):\n hash_str += \"{:.3f}\".format(v)\n elif isinstance(v, bool):\n hash_str += \"{}\".format(bool)\n elif isinstance(v, (tuple, list)):\n for item in v:\n if isinstance(item, float):\n hash_str += \"{:.3f}\".format(item)\n elif isinstance(item, (Orbital, OrbitalDescription)):\n for item_v in item:\n if isinstance(item_v, (int, str)):\n hash_str += \"{}\".format(item_v)\n elif isinstance(item_v, float):\n hash_str += \"{:.3f}\".format(item_v)\n else:\n hash_str += \"{}\".format(item_v) if item_v else \"\"\n else:\n hash_str += v.replace(\" \", \"\")\n\n self.hash_str = hash_str\n return md5(hash_str.lower().encode('utf-8')).hexdigest()\n\n def __getattr__(self, a):\n \"\"\"\n Delegates attributes to keywords. For example, you can use\n potcarsingle.enmax to get the ENMAX of the POTCAR.\n\n For float type properties, they are converted to the correct float. By\n default, all energies in eV and all length scales are in Angstroms.\n \"\"\"\n\n try:\n return self.keywords[a.upper()]\n except:\n raise AttributeError(a)\n\n\nclass Potcar(list, MSONable):\n \"\"\"\n Object for reading and writing POTCAR files for calculations. Consists of a\n list of PotcarSingle.\n\n Args:\n symbols ([str]): Element symbols for POTCAR. This should correspond\n to the symbols used by VASP. E.g., \"Mg\", \"Fe_pv\", etc.\n functional (str): Functional used. To know what functional options\n there are, use Potcar.FUNCTIONAL_CHOICES. Note that VASP has\n different versions of the same functional. By default, the old\n PBE functional is used. If you want the newer ones, use PBE_52 or\n PBE_54. Note that if you intend to compare your results with the\n Materials Project, you should use the default setting. You can also\n override the default by setting PMG_DEFAULT_FUNCTIONAL in your\n .pmgrc.yaml.\n sym_potcar_map (dict): Allows a user to specify a specific element\n symbol to raw POTCAR mapping.\n \"\"\"\n\n FUNCTIONAL_CHOICES = list(PotcarSingle.functional_dir.keys())\n\n def __init__(self, symbols=None, functional=None, sym_potcar_map=None):\n if functional is None:\n functional = SETTINGS.get(\"PMG_DEFAULT_FUNCTIONAL\", \"PBE\")\n super(Potcar, self).__init__()\n self.functional = functional\n if symbols is not None:\n self.set_symbols(symbols, functional, sym_potcar_map)\n\n def as_dict(self):\n return {\"functional\": self.functional, \"symbols\": self.symbols,\n \"@module\": self.__class__.__module__,\n \"@class\": self.__class__.__name__}\n\n @classmethod\n def from_dict(cls, d):\n return Potcar(symbols=d[\"symbols\"], functional=d[\"functional\"])\n\n @staticmethod\n def from_file(filename):\n try:\n with zopen(filename, \"rt\") as f:\n fdata = f.read()\n except UnicodeDecodeError:\n warnings.warn(\"POTCAR contains invalid unicode errors. \"\n \"We will attempt to read it by ignoring errors.\")\n import codecs\n with codecs.open(filename, \"r\", encoding=\"utf-8\",\n errors=\"ignore\") as f:\n fdata = f.read()\n\n potcar = Potcar()\n potcar_strings = re.compile(r\"\\n?(\\s*.*?End of Dataset)\",\n re.S).findall(fdata)\n functionals = []\n for p in potcar_strings:\n single = PotcarSingle(p)\n potcar.append(single)\n functionals.append(single.functional)\n if len(set(functionals)) != 1:\n raise ValueError(\"File contains incompatible functionals!\")\n else:\n potcar.functional = functionals[0]\n return potcar\n\n def __str__(self):\n return \"\\n\".join([str(potcar).strip(\"\\n\") for potcar in self]) + \"\\n\"\n\n def write_file(self, filename):\n \"\"\"\n Write Potcar to a file.\n\n Args:\n filename (str): filename to write to.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__())\n\n @property\n def symbols(self):\n \"\"\"\n Get the atomic symbols of all the atoms in the POTCAR file.\n \"\"\"\n return [p.symbol for p in self]\n\n @symbols.setter\n def symbols(self, symbols):\n self.set_symbols(symbols, functional=self.functional)\n\n @property\n def spec(self):\n \"\"\"\n Get the atomic symbols and hash of all the atoms in the POTCAR file.\n \"\"\"\n return [{\"symbol\": p.symbol, \"hash\": p.get_potcar_hash()} for p in self]\n\n def set_symbols(self, symbols, functional=None,\n sym_potcar_map=None):\n \"\"\"\n Initialize the POTCAR from a set of symbols. Currently, the POTCARs can\n be fetched from a location specified in .pmgrc.yaml. Use pmg config\n to add this setting.\n\n Args:\n symbols ([str]): A list of element symbols\n functional (str): The functional to use. If None, the setting\n PMG_DEFAULT_FUNCTIONAL in .pmgrc.yaml is used, or if this is\n not set, it will default to PBE.\n sym_potcar_map (dict): A map of symbol:raw POTCAR string. If\n sym_potcar_map is specified, POTCARs will be generated from\n the given map data rather than the config file location.\n \"\"\"\n del self[:]\n if sym_potcar_map:\n for el in symbols:\n self.append(PotcarSingle(sym_potcar_map[el]))\n else:\n for el in symbols:\n p = PotcarSingle.from_symbol_and_functional(el, functional)\n self.append(p)\n\n\nclass VaspInput(dict, MSONable):\n \"\"\"\n Class to contain a set of vasp input objects corresponding to a run.\n\n Args:\n incar: Incar object.\n kpoints: Kpoints object.\n poscar: Poscar object.\n potcar: Potcar object.\n optional_files: Other input files supplied as a dict of {\n filename: object}. The object should follow standard pymatgen\n conventions in implementing a as_dict() and from_dict method.\n \"\"\"\n\n def __init__(self, incar, kpoints, poscar, potcar, optional_files=None,\n **kwargs):\n super(VaspInput, self).__init__(**kwargs)\n self.update({'INCAR': incar,\n 'KPOINTS': kpoints,\n 'POSCAR': poscar,\n 'POTCAR': potcar})\n if optional_files is not None:\n self.update(optional_files)\n\n def __str__(self):\n output = []\n for k, v in self.items():\n output.append(k)\n output.append(str(v))\n output.append(\"\")\n return \"\\n\".join(output)\n\n def as_dict(self):\n d = {k: v.as_dict() for k, v in self.items()}\n d[\"@module\"] = self.__class__.__module__\n d[\"@class\"] = self.__class__.__name__\n return d\n\n @classmethod\n def from_dict(cls, d):\n dec = MontyDecoder()\n sub_d = {\"optional_files\": {}}\n for k, v in d.items():\n if k in [\"INCAR\", \"POSCAR\", \"POTCAR\", \"KPOINTS\"]:\n sub_d[k.lower()] = dec.process_decoded(v)\n elif k not in [\"@module\", \"@class\"]:\n sub_d[\"optional_files\"][k] = dec.process_decoded(v)\n return cls(**sub_d)\n\n def write_input(self, output_dir=\".\", make_dir_if_not_present=True):\n \"\"\"\n Write VASP input to a directory.\n\n Args:\n output_dir (str): Directory to write to. Defaults to current\n directory (\".\").\n make_dir_if_not_present (bool): Create the directory if not\n present. Defaults to True.\n \"\"\"\n if make_dir_if_not_present and not os.path.exists(output_dir):\n os.makedirs(output_dir)\n for k, v in self.items():\n with zopen(os.path.join(output_dir, k), \"wt\") as f:\n f.write(v.__str__())\n\n @staticmethod\n def from_directory(input_dir, optional_files=None):\n \"\"\"\n Read in a set of VASP input from a directory. Note that only the\n standard INCAR, POSCAR, POTCAR and KPOINTS files are read unless\n optional_filenames is specified.\n\n Args:\n input_dir (str): Directory to read VASP input from.\n optional_files (dict): Optional files to read in as well as a\n dict of {filename: Object type}. Object type must have a\n static method from_file.\n \"\"\"\n sub_d = {}\n for fname, ftype in [(\"INCAR\", Incar), (\"KPOINTS\", Kpoints),\n (\"POSCAR\", Poscar), (\"POTCAR\", Potcar)]:\n fullzpath = zpath(os.path.join(input_dir, fname))\n sub_d[fname.lower()] = ftype.from_file(fullzpath)\n sub_d[\"optional_files\"] = {}\n if optional_files is not None:\n for fname, ftype in optional_files.items():\n sub_d[\"optional_files\"][fname] = \\\n ftype.from_file(os.path.join(input_dir, fname))\n return VaspInput(**sub_d)\n\n def run_vasp(self, run_dir: PathLike = \".\",\n vasp_cmd: list = None,\n output_file: PathLike = \"vasp.out\",\n err_file: PathLike = \"vasp.err\"):\n \"\"\"\n Write input files and run VASP.\n\n :param run_dir: Where to write input files and do the run.\n :param vasp_cmd: Args to be supplied to run VASP. Otherwise, the\n PMG_VASP_EXE in .pmgrc.yaml is used.\n :param output_file: File to write output.\n :param err_file: File to write err.\n \"\"\"\n self.write_input(output_dir=run_dir)\n vasp_cmd = vasp_cmd or SETTINGS.get(\"PMG_VASP_EXE\")\n vasp_cmd = [os.path.expanduser(os.path.expandvars(t)) for t in vasp_cmd]\n if not vasp_cmd:\n raise RuntimeError(\"You need to supply vasp_cmd or set the PMG_VASP_EXE in .pmgrc.yaml to run VASP.\")\n with cd(run_dir):\n with open(output_file, 'w') as f_std, \\\n open(err_file, \"w\", buffering=1) as f_err:\n subprocess.check_call(vasp_cmd, stdout=f_std, stderr=f_err)\n\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\n\"\"\"\nThis module provides classes and methods used to describe deformations and\nstrains, including applying those deformations to structure objects and\ngenerating deformed structure sets for further calculations.\n\"\"\"\n\nimport numpy as np\nimport scipy\nimport itertools\n\nimport collections\nfrom monty.dev import deprecated\n\nfrom pymatgen.core.lattice import Lattice\nfrom pymatgen.core.tensors import SquareTensor, symmetry_reduce\n\n__author__ = \"Joseph Montoya\"\n__copyright__ = \"Copyright 2012, The Materials Project\"\n__credits__ = \"Maarten de Jong, Mark Asta, Anubhav Jain\"\n__version__ = \"1.0\"\n__maintainer__ = \"Joseph Montoya\"\n__email__ = \"[email protected]\"\n__status__ = \"Production\"\n__date__ = \"July 24, 2018\"\n\n\nclass Deformation(SquareTensor):\n \"\"\"\n Subclass of SquareTensor that describes the deformation gradient tensor\n \"\"\"\n symbol = \"d\"\n\n def __new__(cls, deformation_gradient):\n \"\"\"\n Create a Deformation object. Note that the constructor uses __new__\n rather than __init__ according to the standard method of subclassing\n numpy ndarrays.\n\n Args:\n deformation_gradient (3x3 array-like): the 3x3 array-like\n representing the deformation gradient\n \"\"\"\n obj = super(Deformation, cls).__new__(cls, deformation_gradient)\n return obj.view(cls)\n\n def is_independent(self, tol=1e-8):\n \"\"\"\n checks to determine whether the deformation is independent\n \"\"\"\n return len(self.get_perturbed_indices(tol)) == 1\n\n def get_perturbed_indices(self, tol=1e-8):\n \"\"\"\n Gets indices of perturbed elements of the deformation gradient,\n i. e. those that differ from the identity\n \"\"\"\n indices = list(zip(*np.where(abs(self - np.eye(3)) > tol)))\n return indices\n\n @property\n def green_lagrange_strain(self):\n \"\"\"\n calculates the euler-lagrange strain from\n the deformation gradient\n \"\"\"\n return Strain.from_deformation(self)\n\n def apply_to_structure(self, structure):\n \"\"\"\n Apply the deformation gradient to a structure.\n\n Args:\n structure (Structure object): the structure object to\n be modified by the deformation\n \"\"\"\n def_struct = structure.copy()\n old_latt = def_struct.lattice.matrix\n new_latt = np.transpose(np.dot(self, np.transpose(old_latt)))\n def_struct.lattice = Lattice(new_latt)\n return def_struct\n\n @classmethod\n def from_index_amount(cls, matrixpos, amt):\n \"\"\"\n Factory method for constructing a Deformation object\n from a matrix position and amount\n\n Args:\n matrixpos (tuple): tuple corresponding the matrix position to\n have a perturbation added\n amt (float): amount to add to the identity matrix at position\n matrixpos\n \"\"\"\n f = np.identity(3)\n f[matrixpos] += amt\n return cls(f)\n\n\nclass DeformedStructureSet(collections.abc.Sequence):\n \"\"\"\n class that generates a set of independently deformed structures that\n can be used to calculate linear stress-strain response\n \"\"\"\n\n def __init__(self, structure, norm_strains=None, shear_strains=None,\n symmetry=False):\n \"\"\"\n constructs the deformed geometries of a structure. Generates\n m + n deformed structures according to the supplied parameters.\n\n Args:\n structure (Structure): structure to undergo deformation\n norm_strains (list of floats): strain values to apply\n to each normal mode.\n shear_strains (list of floats): strain values to apply\n to each shear mode.\n symmetry (bool): whether or not to use symmetry reduction.\n \"\"\"\n norm_strains = norm_strains or [-0.01, -0.005, 0.005, 0.01]\n shear_strains = shear_strains or [-0.06, -0.03, 0.03, 0.06]\n\n self.undeformed_structure = structure\n self.deformations = []\n self.def_structs = []\n\n # Generate deformations\n for ind in [(0, 0), (1, 1), (2, 2)]:\n for amount in norm_strains:\n strain = Strain.from_index_amount(ind, amount)\n self.deformations.append(strain.get_deformation_matrix())\n\n for ind in [(0, 1), (0, 2), (1, 2)]:\n for amount in shear_strains:\n strain = Strain.from_index_amount(ind, amount)\n self.deformations.append(strain.get_deformation_matrix())\n\n # Perform symmetry reduction if specified\n if symmetry:\n self.sym_dict = symmetry_reduce(self.deformations, structure)\n self.deformations = list(self.sym_dict.keys())\n self.deformed_structures = [defo.apply_to_structure(structure)\n for defo in self.deformations]\n\n def __iter__(self):\n return iter(self.deformed_structures)\n\n def __len__(self):\n return len(self.deformed_structures)\n\n def __getitem__(self, ind):\n return self.deformed_structures[ind]\n\n\nclass Strain(SquareTensor):\n \"\"\"\n Subclass of SquareTensor that describes the Green-Lagrange strain tensor.\n \"\"\"\n symbol = \"e\"\n\n def __new__(cls, strain_matrix):\n \"\"\"\n Create a Strain object. Note that the constructor uses __new__\n rather than __init__ according to the standard method of\n subclassing numpy ndarrays. Note also that the default constructor\n does not include the deformation gradient\n\n Args:\n strain_matrix (3x3 array-like): the 3x3 array-like\n representing the Green-Lagrange strain\n \"\"\"\n vscale = np.ones((6,))\n vscale[3:] *= 2\n obj = super(Strain, cls).__new__(cls, strain_matrix, vscale=vscale)\n if not obj.is_symmetric():\n raise ValueError(\"Strain objects must be initialized \"\n \"with a symmetric array or a voigt-notation \"\n \"vector with six entries.\")\n return obj.view(cls)\n\n def __array_finalize__(self, obj):\n if obj is None:\n return\n self.rank = getattr(obj, \"rank\", None)\n self._vscale = getattr(obj, \"_vscale\", None)\n\n @classmethod\n def from_deformation(cls, deformation):\n \"\"\"\n Factory method that returns a Strain object from a deformation\n gradient\n\n Args:\n deformation (3x3 array-like):\n \"\"\"\n dfm = Deformation(deformation)\n return cls(0.5 * (np.dot(dfm.trans, dfm) - np.eye(3)))\n\n @classmethod\n def from_index_amount(cls, idx, amount):\n \"\"\"\n Like Deformation.from_index_amount, except generates\n a strain from the zero 3x3 tensor or voigt vector with\n the amount specified in the index location. Ensures\n symmetric strain.\n\n Args:\n idx (tuple or integer): index to be perturbed, can be voigt or\n full-tensor notation\n amount (float): amount to perturb selected index\n \"\"\"\n if np.array(idx).ndim == 0:\n v = np.zeros(6)\n v[idx] = amount\n return cls.from_voigt(v)\n elif np.array(idx).ndim == 1:\n v = np.zeros((3, 3))\n for i in itertools.permutations(idx):\n v[i] = amount\n return cls(v)\n else:\n raise ValueError(\"Index must either be 2-tuple or integer \"\n \"corresponding to full-tensor or voigt index\")\n\n @property\n @deprecated(message=\"the deformation_matrix property is deprecated, and \"\n \"will be removed in pymatgen v2019.1.1, please use the \"\n \"get_deformation_matrix method instead.\")\n def deformation_matrix(self):\n return self.get_deformation_matrix()\n\n def get_deformation_matrix(self, shape=\"upper\"):\n \"\"\"\n returns the deformation matrix\n \"\"\"\n return convert_strain_to_deformation(self, shape=shape)\n\n @property\n def von_mises_strain(self):\n \"\"\"\n Equivalent strain to Von Mises Stress\n \"\"\"\n eps = self - 1/3 * np.trace(self) * np.identity(3)\n\n return np.sqrt(np.sum(eps * eps) * 2/3)\n\n\ndef convert_strain_to_deformation(strain, shape=\"upper\"):\n \"\"\"\n This function converts a strain to a deformation gradient that will\n produce that strain. Supports three methods:\n\n Args:\n strain (3x3 array-like): strain matrix\n shape: (string): method for determining deformation, supports\n \"upper\" produces an upper triangular defo\n \"lower\" produces a lower triangular defo\n \"symmetric\" produces a symmetric defo\n \"\"\"\n strain = SquareTensor(strain)\n ftdotf = 2*strain + np.eye(3)\n if shape == \"upper\":\n result = scipy.linalg.cholesky(ftdotf)\n elif shape == \"symmetric\":\n result = scipy.linalg.sqrtm(ftdotf)\n else:\n raise ValueError(\"shape must be \\\"upper\\\" or \\\"symmetric\\\"\")\n return Deformation(result)\n",
"# coding: utf-8\n# Copyright (c) Pymatgen Development Team.\n# Distributed under the terms of the MIT License.\n\n\nimport re\nimport warnings\nfrom operator import itemgetter\n\nfrom tabulate import tabulate\n\nimport numpy as np\n\nfrom monty.io import zopen\nfrom monty.json import MSONable\n\nfrom pymatgen import Structure, Lattice, Element, Molecule\nfrom pymatgen.io.cif import CifParser\nfrom pymatgen.symmetry.analyzer import SpacegroupAnalyzer\nfrom pymatgen.util.io_utils import clean_lines\nfrom pymatgen.util.string import str_delimited\n\n\n\"\"\"\nThis module defines classes for reading/manipulating/writing the main sections\nof FEFF input file(feff.inp), namely HEADER, ATOMS, POTENTIAL and the program\ncontrol tags.\n\nXANES and EXAFS input files, are available, for non-spin case at this time.\n\"\"\"\n\n__author__ = \"Alan Dozier, Kiran Mathew\"\n__credits__ = \"Anubhav Jain, Shyue Ping Ong\"\n__copyright__ = \"Copyright 2011, The Materials Project\"\n__version__ = \"1.0.3\"\n__maintainer__ = \"Alan Dozier\"\n__email__ = \"[email protected]\"\n__status__ = \"Beta\"\n__date__ = \"April 7, 2013\"\n\n# **Non-exhaustive** list of valid Feff.inp tags\nVALID_FEFF_TAGS = (\"CONTROL\", \"PRINT\", \"ATOMS\", \"POTENTIALS\", \"RECIPROCAL\",\n \"REAL\", \"MARKER\", \"LATTICE\", \"TITLE\", \"RMULTIPLIER\",\n \"SGROUP\", \"COORDINATES\", \"EQUIVALENCE\", \"CIF\", \"CGRID\",\n \"CFAVERAGE\", \"OVERLAP\", \"EXAFS\", \"XANES\", \"ELNES\", \"EXELFS\",\n \"LDOS\", \"ELLIPTICITY\", \"MULTIPOLE\", \"POLARIZATION\",\n \"RHOZZP\", \"DANES\", \"FPRIME\", \"NRIXS\", \"XES\", \"XNCD\",\n \"XMCD\", \"XNCDCONTROL\", \"END\", \"KMESH\", \"PRINT\", \"EGRID\",\n \"DIMS\", \"AFOLP\", \"EDGE\", \"COMPTON\", \"DANES\",\n \"FPRIME\" \"MDFF\", \"HOLE\", \"COREHOLE\", \"S02\", \"CHBROAD\",\n \"EXCHANGE\", \"FOLP\", \"NOHOLE\", \"RGRID\", \"SCF\",\n \"UNFREEZEF\", \"CHSHIFT\", \"DEBYE\",\n \"INTERSTITIAL\", \"CHWIDTH\", \"EGAP\", \"EPS0\", \"EXTPOT\",\n \"ION\", \"JUMPRM\", \"EXPOT\", \"SPIN\", \"LJMAX\", \"LDEC\", \"MPSE\",\n \"PLASMON\", \"RPHASES\", \"RSIGMA\", \"PMBSE\", \"TDLDA\", \"FMS\",\n \"DEBYA\", \"OPCONS\", \"PREP\", \"RESTART\", \"SCREEN\", \"SETE\",\n \"STRFACTORS\", \"BANDSTRUCTURE\", \"RPATH\", \"NLEG\", \"PCRITERIA\",\n \"SYMMETRY\", \"SS\", \"CRITERIA\", \"IORDER\", \"NSTAR\", \"ABSOLUTE\",\n \"CORRECTIONS\", \"SIG2\", \"SIG3\", \"MBCONV\", \"SFCONV\", \"RCONV\",\n \"SELF\", \"SFSE\", \"MAGIC\", \"TARGET\", \"STRFAC\")\n\n\nclass Header(MSONable):\n \"\"\"\n Creates Header for the FEFF input file.\n\n Has the following format::\n\n * This feff.inp file generated by pymatgen, www.materialsproject.org\n TITLE comment:\n TITLE Source: CoO19128.cif\n TITLE Structure Summary: (Co2 O2)\n TITLE Reduced formula: CoO\n TITLE space group: P1, space number: 1\n TITLE abc: 3.297078 3.297078 5.254213\n TITLE angles: 90.0 90.0 120.0\n TITLE sites: 4\n * 1 Co 0.666666 0.333332 0.496324\n * 2 Co 0.333333 0.666667 0.996324\n * 3 O 0.666666 0.333332 0.878676\n * 4 O 0.333333 0.666667 0.378675\n\n Args:\n struct: Structure object, See pymatgen.core.structure.Structure.\n source: User supplied identifier, i.e. for Materials Project this\n would be the material ID number\n comment: Comment for first header line\n \"\"\"\n\n def __init__(self, struct, source='', comment=''):\n if struct.is_ordered:\n self.struct = struct\n self.source = source\n sym = SpacegroupAnalyzer(struct)\n data = sym.get_symmetry_dataset()\n self.space_number = data[\"number\"]\n self.space_group = data[\"international\"]\n self.comment = comment or \"None given\"\n else:\n raise ValueError(\"Structure with partial occupancies cannot be \"\n \"converted into atomic coordinates!\")\n\n @staticmethod\n def from_cif_file(cif_file, source='', comment=''):\n \"\"\"\n Static method to create Header object from cif_file\n\n Args:\n cif_file: cif_file path and name\n source: User supplied identifier, i.e. for Materials Project this\n would be the material ID number\n comment: User comment that goes in header\n\n Returns:\n Header Object\n \"\"\"\n r = CifParser(cif_file)\n structure = r.get_structures()[0]\n return Header(structure, source, comment)\n\n @property\n def structure_symmetry(self):\n \"\"\"\n Returns space number and space group\n\n Returns:\n Space number and space group list\n \"\"\"\n return self.space_group, self.space_number\n\n @property\n def formula(self):\n \"\"\"\n Formula of structure\n \"\"\"\n return self.struct.composition.formula\n\n @staticmethod\n def from_file(filename):\n \"\"\"\n Returns Header object from file\n \"\"\"\n hs = Header.header_string_from_file(filename)\n return Header.from_string(hs)\n\n @staticmethod\n def header_string_from_file(filename='feff.inp'):\n \"\"\"\n Reads Header string from either a HEADER file or feff.inp file\n Will also read a header from a non-pymatgen generated feff.inp file\n\n Args:\n filename: File name containing the Header data.\n\n Returns:\n Reads header string.\n \"\"\"\n with zopen(filename, \"r\") as fobject:\n f = fobject.readlines()\n feff_header_str = []\n ln = 0\n\n # Checks to see if generated by pymatgen\n try:\n feffpmg = f[0].find(\"pymatgen\")\n except IndexError:\n feffpmg = False\n\n # Reads pymatgen generated header or feff.inp file\n if feffpmg:\n nsites = int(f[8].split()[2])\n for line in f:\n ln += 1\n if ln <= nsites + 9:\n feff_header_str.append(line)\n else:\n # Reads header from header from feff.inp file from unknown\n # source\n end = 0\n for line in f:\n if (line[0] == \"*\" or line[0] == \"T\") and end == 0:\n feff_header_str.append(line.replace(\"\\r\", \"\"))\n else:\n end = 1\n\n return ''.join(feff_header_str)\n\n @staticmethod\n def from_string(header_str):\n \"\"\"\n Reads Header string and returns Header object if header was\n generated by pymatgen.\n Note: Checks to see if generated by pymatgen, if not it is impossible\n to generate structure object so it is not possible to generate\n header object and routine ends\n\n Args:\n header_str: pymatgen generated feff.inp header\n\n Returns:\n Structure object.\n \"\"\"\n lines = tuple(clean_lines(header_str.split(\"\\n\"), False))\n comment1 = lines[0]\n feffpmg = comment1.find(\"pymatgen\")\n\n if feffpmg:\n comment2 = ' '.join(lines[1].split()[2:])\n\n source = ' '.join(lines[2].split()[2:])\n basis_vec = lines[6].split(\":\")[-1].split()\n # a, b, c\n a = float(basis_vec[0])\n b = float(basis_vec[1])\n c = float(basis_vec[2])\n lengths = [a, b, c]\n # alpha, beta, gamma\n basis_ang = lines[7].split(\":\")[-1].split()\n alpha = float(basis_ang[0])\n beta = float(basis_ang[1])\n gamma = float(basis_ang[2])\n angles = [alpha, beta, gamma]\n\n lattice = Lattice.from_lengths_and_angles(lengths, angles)\n\n natoms = int(lines[8].split(\":\")[-1].split()[0])\n\n atomic_symbols = []\n for i in range(9, 9 + natoms):\n atomic_symbols.append(lines[i].split()[2])\n\n # read the atomic coordinates\n coords = []\n for i in range(natoms):\n toks = lines[i + 9].split()\n coords.append([float(s) for s in toks[3:]])\n\n struct = Structure(lattice, atomic_symbols, coords, False,\n False, False)\n\n h = Header(struct, source, comment2)\n\n return h\n else:\n return \"Header not generated by pymatgen, cannot return header object\"\n\n def __str__(self):\n \"\"\"\n String representation of Header.\n \"\"\"\n to_s = lambda x: \"%0.6f\" % x\n output = [\"* This FEFF.inp file generated by pymatgen\",\n ''.join([\"TITLE comment: \", self.comment]),\n ''.join([\"TITLE Source: \", self.source]),\n \"TITLE Structure Summary: {}\"\n .format(self.struct.composition.formula),\n \"TITLE Reduced formula: {}\"\n .format(self.struct.composition.reduced_formula),\n \"TITLE space group: ({}), space number: ({})\"\n .format(self.space_group, self.space_number),\n \"TITLE abc:{}\".format(\" \".join(\n [to_s(i).rjust(10) for i in self.struct.lattice.abc])),\n \"TITLE angles:{}\".format(\" \".join(\n [to_s(i).rjust(10) for i in self.struct.lattice.angles])),\n \"TITLE sites: {}\".format(self.struct.num_sites)]\n for i, site in enumerate(self.struct):\n output.append(\" \".join([\"*\", str(i + 1), site.species_string,\n \" \".join([to_s(j).rjust(12)\n for j in site.frac_coords])]))\n return \"\\n\".join(output)\n\n def write_file(self, filename='HEADER'):\n \"\"\"\n Writes Header into filename on disk.\n\n Args:\n filename: Filename and path for file to be written to disk\n \"\"\"\n with open(filename, \"w\") as f:\n f.write(str(self) + \"\\n\")\n\n\nclass Atoms(MSONable):\n \"\"\"\n Atomic cluster centered around the absorbing atom.\n \"\"\"\n\n def __init__(self, struct, absorbing_atom, radius):\n \"\"\"\n Args:\n struct (Structure): input structure\n absorbing_atom (str/int): Symbol for absorbing atom or site index\n radius (float): radius of the atom cluster in Angstroms.\n \"\"\"\n if struct.is_ordered:\n self.struct = struct\n self.pot_dict = get_atom_map(struct)\n else:\n raise ValueError(\"Structure with partial occupancies cannot be \"\n \"converted into atomic coordinates!\")\n\n self.absorbing_atom, self.center_index = \\\n get_absorbing_atom_symbol_index(absorbing_atom, struct)\n self.radius = radius\n self._cluster = self._set_cluster()\n\n def _set_cluster(self):\n \"\"\"\n Compute and set the cluster of atoms as a Molecule object. The siteato\n coordinates are translated such that the absorbing atom(aka central\n atom) is at the origin.\n\n Returns:\n Molecule\n \"\"\"\n center = self.struct[self.center_index].coords\n sphere = self.struct.get_neighbors(self.struct[self.center_index], self.radius)\n\n symbols = [self.absorbing_atom]\n coords = [[0, 0, 0]]\n for i, site_dist in enumerate(sphere):\n site_symbol = re.sub(r\"[^aA-zZ]+\", \"\", site_dist[0].species_string)\n symbols.append(site_symbol)\n coords.append(site_dist[0].coords - center)\n return Molecule(symbols, coords)\n\n @property\n def cluster(self):\n \"\"\"\n Returns the atomic cluster as a Molecule object.\n \"\"\"\n return self._cluster\n\n @staticmethod\n def atoms_string_from_file(filename):\n \"\"\"\n Reads atomic shells from file such as feff.inp or ATOMS file\n The lines are arranged as follows:\n\n x y z ipot Atom Symbol Distance Number\n\n with distance being the shell radius and ipot an integer identifying\n the potential used.\n\n Args:\n filename: File name containing atomic coord data.\n\n Returns:\n Atoms string.\n \"\"\"\n with zopen(filename, \"rt\") as fobject:\n f = fobject.readlines()\n coords = 0\n atoms_str = []\n\n for line in f:\n if coords == 0:\n find_atoms = line.find(\"ATOMS\")\n if find_atoms >= 0:\n coords = 1\n if coords == 1 and not (\"END\" in line):\n atoms_str.append(line.replace(\"\\r\", \"\"))\n\n return ''.join(atoms_str)\n\n @staticmethod\n def cluster_from_file(filename):\n \"\"\"\n Parse the feff input file and return the atomic cluster as a Molecule\n object.\n\n Args:\n filename (str): path the feff input file\n\n Returns:\n Molecule: the atomic cluster as Molecule object. The absorbing atom\n is the one at the origin.\n \"\"\"\n atoms_string = Atoms.atoms_string_from_file(filename)\n line_list = [l.split() for l in atoms_string.splitlines()[3:]]\n coords = []\n symbols = []\n for l in line_list:\n if l:\n coords.append([float(i) for i in l[:3]])\n symbols.append(l[4])\n return Molecule(symbols, coords)\n\n def get_lines(self):\n \"\"\"\n Returns a list of string representations of the atomic configuration\n information(x, y, z, ipot, atom_symbol, distance, id).\n\n Returns:\n list: list of strings, sorted by the distance from the absorbing\n atom.\n \"\"\"\n lines = [[\"{:f}\".format(self._cluster[0].x),\n \"{:f}\".format(self._cluster[0].y),\n \"{:f}\".format(self._cluster[0].z),\n 0, self.absorbing_atom, \"0.0\", 0]]\n for i, site in enumerate(self._cluster[1:]):\n site_symbol = re.sub(r\"[^aA-zZ]+\", \"\", site.species_string)\n ipot = self.pot_dict[site_symbol]\n lines.append([\"{:f}\".format(site.x), \"{:f}\".format(site.y),\n \"{:f}\".format(site.z), ipot, site_symbol,\n \"{:f}\".format(self._cluster.get_distance(0, i + 1)), i + 1])\n\n return sorted(lines, key=itemgetter(5))\n\n def __str__(self):\n \"\"\"\n String representation of Atoms file.\n \"\"\"\n lines_sorted = self.get_lines()\n # TODO: remove the formatting and update the unittests\n lines_formatted = str(tabulate(lines_sorted,\n headers=[\"* x\", \"y\", \"z\", \"ipot\",\n \"Atom\", \"Distance\", \"Number\"]))\n atom_list = lines_formatted.replace(\"--\", \"**\")\n return ''.join([\"ATOMS\\n\", atom_list, \"\\nEND\\n\"])\n\n def write_file(self, filename='ATOMS'):\n \"\"\"\n Write Atoms list to file.\n\n Args:\n filename: path for file to be written\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(str(self) + \"\\n\")\n\n\nclass Tags(dict):\n \"\"\"\n FEFF control parameters.\n \"\"\"\n\n def __init__(self, params=None):\n \"\"\"\n Args:\n params: A set of input parameters as a dictionary.\n \"\"\"\n super(Tags, self).__init__()\n if params:\n self.update(params)\n\n def __setitem__(self, key, val):\n \"\"\"\n Add parameter-val pair. Warns if parameter is not in list of valid\n Feff tags. Also cleans the parameter and val by stripping leading and\n trailing white spaces.\n\n Arg:\n key: dict key value\n value: value associated with key in dictionary\n \"\"\"\n if key.strip().upper() not in VALID_FEFF_TAGS:\n warnings.warn(key.strip() + \" not in VALID_FEFF_TAGS list\")\n super(Tags, self).__setitem__(key.strip(),\n Tags.proc_val(key.strip(), val.strip())\n if isinstance(val, str) else val)\n\n def as_dict(self):\n \"\"\"\n Dict representation.\n\n Returns:\n Dictionary of parameters from fefftags object\n \"\"\"\n tags_dict = dict(self)\n tags_dict['@module'] = self.__class__.__module__\n tags_dict['@class'] = self.__class__.__name__\n return tags_dict\n\n @staticmethod\n def from_dict(d):\n \"\"\"\n Creates Tags object from a dictionary.\n\n Args:\n d: Dict of feff parameters and values.\n\n Returns:\n Tags object\n \"\"\"\n i = Tags()\n for k, v in d.items():\n if k not in (\"@module\", \"@class\"):\n i[k] = v\n return i\n\n def get_string(self, sort_keys=False, pretty=False):\n \"\"\"\n Returns a string representation of the Tags. The reason why this\n method is different from the __str__ method is to provide options\n for pretty printing.\n\n Args:\n sort_keys: Set to True to sort the Feff parameters alphabetically.\n Defaults to False.\n pretty: Set to True for pretty aligned output. Defaults to False.\n\n Returns:\n String representation of Tags.\n \"\"\"\n keys = self.keys()\n if sort_keys:\n keys = sorted(keys)\n lines = []\n for k in keys:\n if isinstance(self[k], dict):\n if k in [\"ELNES\", \"EXELFS\"]:\n lines.append([k, self._stringify_val(self[k][\"ENERGY\"])])\n beam_energy = self._stringify_val(self[k][\"BEAM_ENERGY\"])\n beam_energy_list = beam_energy.split()\n if int(beam_energy_list[1]) == 0: # aver=0, specific beam direction\n lines.append([beam_energy])\n lines.append([self._stringify_val(self[k][\"BEAM_DIRECTION\"])])\n else:\n # no cross terms for orientation averaged spectrum\n beam_energy_list[2] = str(0)\n lines.append([self._stringify_val(beam_energy_list)])\n lines.append([self._stringify_val(self[k][\"ANGLES\"])])\n lines.append([self._stringify_val(self[k][\"MESH\"])])\n lines.append([self._stringify_val(self[k][\"POSITION\"])])\n else:\n lines.append([k, self._stringify_val(self[k])])\n if pretty:\n return tabulate(lines)\n else:\n return str_delimited(lines, None, \" \")\n\n @staticmethod\n def _stringify_val(val):\n \"\"\"\n Convert the given value to string.\n \"\"\"\n if isinstance(val, list):\n return \" \".join([str(i) for i in val])\n else:\n return str(val)\n\n def __str__(self):\n return self.get_string()\n\n def write_file(self, filename='PARAMETERS'):\n \"\"\"\n Write Tags to a Feff parameter tag file.\n\n Args:\n filename: filename and path to write to.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(self.__str__() + \"\\n\")\n\n @staticmethod\n def from_file(filename=\"feff.inp\"):\n \"\"\"\n Creates a Feff_tag dictionary from a PARAMETER or feff.inp file.\n\n Args:\n filename: Filename for either PARAMETER or feff.inp file\n\n Returns:\n Feff_tag object\n \"\"\"\n with zopen(filename, \"rt\") as f:\n lines = list(clean_lines(f.readlines()))\n params = {}\n eels_params = []\n ieels = -1\n ieels_max = -1\n for i, line in enumerate(lines):\n m = re.match(r\"([A-Z]+\\d*\\d*)\\s*(.*)\", line)\n if m:\n key = m.group(1).strip()\n val = m.group(2).strip()\n val = Tags.proc_val(key, val)\n if key not in (\"ATOMS\", \"POTENTIALS\", \"END\", \"TITLE\"):\n if key in [\"ELNES\", \"EXELFS\"]:\n ieels = i\n ieels_max = ieels + 5\n else:\n params[key] = val\n if ieels >= 0:\n if i >= ieels and i <= ieels_max:\n if i == ieels + 1:\n if int(line.split()[1]) == 1:\n ieels_max -= 1\n eels_params.append(line)\n\n if eels_params:\n if len(eels_params) == 6:\n eels_keys = ['BEAM_ENERGY', 'BEAM_DIRECTION', 'ANGLES', 'MESH', 'POSITION']\n else:\n eels_keys = ['BEAM_ENERGY', 'ANGLES', 'MESH', 'POSITION']\n eels_dict = {\"ENERGY\": Tags._stringify_val(eels_params[0].split()[1:])}\n for k, v in zip(eels_keys, eels_params[1:]):\n eels_dict[k] = str(v)\n params[str(eels_params[0].split()[0])] = eels_dict\n\n return Tags(params)\n\n @staticmethod\n def proc_val(key, val):\n \"\"\"\n Static helper method to convert Feff parameters to proper types, e.g.\n integers, floats, lists, etc.\n\n Args:\n key: Feff parameter key\n val: Actual value of Feff parameter.\n \"\"\"\n\n list_type_keys = list(VALID_FEFF_TAGS)\n del list_type_keys[list_type_keys.index(\"ELNES\")]\n del list_type_keys[list_type_keys.index(\"EXELFS\")]\n boolean_type_keys = ()\n float_type_keys = (\"S02\", \"EXAFS\", \"RPATH\")\n\n def smart_int_or_float(numstr):\n if numstr.find(\".\") != -1 or numstr.lower().find(\"e\") != -1:\n return float(numstr)\n else:\n return int(numstr)\n\n try:\n if key.lower() == 'cif':\n m = re.search(r\"\\w+.cif\", val)\n return m.group(0)\n\n if key in list_type_keys:\n output = list()\n toks = re.split(r\"\\s+\", val)\n\n for tok in toks:\n m = re.match(r\"(\\d+)\\*([\\d\\.\\-\\+]+)\", tok)\n if m:\n output.extend([smart_int_or_float(m.group(2))] *\n int(m.group(1)))\n else:\n output.append(smart_int_or_float(tok))\n return output\n if key in boolean_type_keys:\n m = re.search(r\"^\\W+([TtFf])\", val)\n if m:\n if m.group(1) == \"T\" or m.group(1) == \"t\":\n return True\n else:\n return False\n raise ValueError(key + \" should be a boolean type!\")\n\n if key in float_type_keys:\n return float(val)\n\n except ValueError:\n return val.capitalize()\n\n return val.capitalize()\n\n def diff(self, other):\n \"\"\"\n Diff function. Compares two PARAMETER files and indicates which\n parameters are the same and which are not. Useful for checking whether\n two runs were done using the same parameters.\n\n Args:\n other: The other PARAMETER dictionary to compare to.\n\n Returns:\n Dict of the format {\"Same\" : parameters_that_are_the_same,\n \"Different\": parameters_that_are_different} Note that the\n parameters are return as full dictionaries of values.\n \"\"\"\n similar_param = {}\n different_param = {}\n for k1, v1 in self.items():\n if k1 not in other:\n different_param[k1] = {\"FEFF_TAGS1\": v1,\n \"FEFF_TAGS2\": \"Default\"}\n elif v1 != other[k1]:\n different_param[k1] = {\"FEFF_TAGS1\": v1,\n \"FEFF_TAGS2\": other[k1]}\n else:\n similar_param[k1] = v1\n for k2, v2 in other.items():\n if k2 not in similar_param and k2 not in different_param:\n if k2 not in self:\n different_param[k2] = {\"FEFF_TAGS1\": \"Default\",\n \"FEFF_TAGS2\": v2}\n return {\"Same\": similar_param, \"Different\": different_param}\n\n def __add__(self, other):\n \"\"\"\n Add all the values of another Tags object to this object\n Facilitates the use of \"standard\" Tags\n \"\"\"\n params = dict(self)\n for k, v in other.items():\n if k in self and v != self[k]:\n raise ValueError(\"Tags have conflicting values!\")\n else:\n params[k] = v\n return Tags(params)\n\n\nclass Potential(MSONable):\n \"\"\"\n FEFF atomic potential.\n \"\"\"\n\n def __init__(self, struct, absorbing_atom):\n \"\"\"\n Args:\n struct (Structure): Structure object.\n absorbing_atom (str/int): Absorbing atom symbol or site index\n \"\"\"\n if struct.is_ordered:\n self.struct = struct\n self.pot_dict = get_atom_map(struct)\n else:\n raise ValueError(\"Structure with partial occupancies cannot be \"\n \"converted into atomic coordinates!\")\n\n self.absorbing_atom, _ = \\\n get_absorbing_atom_symbol_index(absorbing_atom, struct)\n\n @staticmethod\n def pot_string_from_file(filename='feff.inp'):\n \"\"\"\n Reads Potential parameters from a feff.inp or FEFFPOT file.\n The lines are arranged as follows:\n\n ipot Z element lmax1 lmax2 stoichometry spinph\n\n Args:\n filename: file name containing potential data.\n\n Returns:\n FEFFPOT string.\n \"\"\"\n with zopen(filename, \"rt\") as f_object:\n f = f_object.readlines()\n ln = -1\n pot_str = [\"POTENTIALS\\n\"]\n pot_tag = -1\n pot_data = 0\n pot_data_over = 1\n\n sep_line_pattern = [re.compile('ipot.*Z.*tag.*lmax1.*lmax2.*spinph'),\n re.compile('^[*]+.*[*]+$')]\n\n for line in f:\n if pot_data_over == 1:\n ln += 1\n if pot_tag == -1:\n pot_tag = line.find(\"POTENTIALS\")\n ln = 0\n if pot_tag >= 0 and ln > 0 and pot_data_over > 0:\n try:\n if len(sep_line_pattern[0].findall(line)) > 0 or \\\n len(sep_line_pattern[1].findall(line)) > 0:\n pot_str.append(line)\n elif int(line.split()[0]) == pot_data:\n pot_data += 1\n pot_str.append(line.replace(\"\\r\", \"\"))\n except (ValueError, IndexError):\n if pot_data > 0:\n pot_data_over = 0\n\n return ''.join(pot_str).rstrip('\\n')\n\n @staticmethod\n def pot_dict_from_string(pot_data):\n \"\"\"\n Creates atomic symbol/potential number dictionary\n forward and reverse\n\n Arg:\n pot_data: potential data in string format\n\n Returns:\n forward and reverse atom symbol and potential number dictionaries.\n \"\"\"\n\n pot_dict = {}\n pot_dict_reverse = {}\n begin = 0\n ln = -1\n\n for line in pot_data.split(\"\\n\"):\n try:\n if begin == 0 and line.split()[0] == \"0\":\n begin += 1\n ln = 0\n if begin == 1:\n ln += 1\n if ln > 0:\n atom = line.split()[2]\n index = int(line.split()[0])\n pot_dict[atom] = index\n pot_dict_reverse[index] = atom\n except (ValueError, IndexError):\n pass\n return pot_dict, pot_dict_reverse\n\n def __str__(self):\n \"\"\"\n Returns a string representation of potential parameters to be used in\n the feff.inp file,\n determined from structure object.\n\n The lines are arranged as follows:\n\n ipot Z element lmax1 lmax2 stoichiometry spinph\n\n Returns:\n String representation of Atomic Coordinate Shells.\n \"\"\"\n central_element = Element(self.absorbing_atom)\n ipotrow = [[0, central_element.Z, central_element.symbol, -1, -1, .0001, 0]]\n for el, amt in self.struct.composition.items():\n ipot = self.pot_dict[el.symbol]\n ipotrow.append([ipot, el.Z, el.symbol, -1, -1, amt, 0])\n ipot_sorted = sorted(ipotrow, key=itemgetter(0))\n ipotrow = str(tabulate(ipot_sorted,\n headers=[\"*ipot\", \"Z\", \"tag\", \"lmax1\",\n \"lmax2\", \"xnatph(stoichometry)\",\n \"spinph\"]))\n ipotlist = ipotrow.replace(\"--\", \"**\")\n ipotlist = ''.join([\"POTENTIALS\\n\", ipotlist])\n\n return ipotlist\n\n def write_file(self, filename='POTENTIALS'):\n \"\"\"\n Write to file.\n\n Args:\n filename: filename and path to write potential file to.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(str(self) + \"\\n\")\n\n\nclass Paths(MSONable):\n \"\"\"\n Set FEFF scattering paths('paths.dat' file used by the 'genfmt' module).\n \"\"\"\n\n def __init__(self, atoms, paths, degeneracies=None):\n \"\"\"\n Args:\n atoms (Atoms): Atoms object\n paths (list(list)): list of paths. Each path is a list of atom indices in the atomic\n cluster(the molecular cluster created by Atoms class).\n e.g. [[0, 1, 2], [5, 9, 4, 1]] -> 2 paths: one with 3 legs and the other with 4 legs.\n degeneracies (list): list of degeneracies, one for each path. Set to 1 if not specified.\n \"\"\"\n self.atoms = atoms\n self.paths = paths\n self.degeneracies = degeneracies or [1] * len(paths)\n assert len(self.degeneracies) == len(self.paths)\n\n def __str__(self):\n lines = [\"PATH\", \"---------------\"]\n # max possible, to avoid name collision count down from max value.\n path_index = 9999\n for i, legs in enumerate(self.paths):\n lines.append(\"{} {} {}\".format(path_index, len(legs), self.degeneracies[i]))\n lines.append(\"x y z ipot label\")\n for l in legs:\n coords = self.atoms.cluster[l].coords.tolist()\n tmp = \"{:.6f} {:.6f} {:.6f}\".format(*tuple(coords))\n element = str(self.atoms.cluster[l].specie.name)\n # the potential index for the absorbing atom(the one at the cluster origin) is 0\n potential = 0 if np.linalg.norm(coords) <= 1e-6 else self.atoms.pot_dict[element]\n tmp = \"{} {} {}\".format(tmp, potential, element)\n lines.append(tmp)\n path_index -= 1\n return \"\\n\".join(lines)\n\n def write_file(self, filename=\"paths.dat\"):\n \"\"\"\n Write paths.dat.\n \"\"\"\n with zopen(filename, \"wt\") as f:\n f.write(str(self) + \"\\n\")\n\n\nclass FeffParserError(Exception):\n \"\"\"\n Exception class for Structure.\n Raised when the structure has problems, e.g., atoms that are too close.\n \"\"\"\n\n def __init__(self, msg):\n self.msg = msg\n\n def __str__(self):\n return \"FeffParserError : \" + self.msg\n\n\ndef get_atom_map(structure):\n \"\"\"\n Returns a dict that maps each atomic symbol to a unique integer starting\n from 1.\n\n Args:\n structure (Structure)\n\n Returns:\n dict\n \"\"\"\n syms = [site.specie.symbol for site in structure]\n unique_pot_atoms = []\n [unique_pot_atoms.append(i) for i in syms if not unique_pot_atoms.count(i)]\n atom_map = {}\n for i, atom in enumerate(unique_pot_atoms):\n atom_map[atom] = i + 1\n return atom_map\n\n\ndef get_absorbing_atom_symbol_index(absorbing_atom, structure):\n \"\"\"\n Return the absorbing atom symboll and site index in the given structure.\n\n Args:\n absorbing_atom (str/int): symbol or site index\n structure (Structure)\n\n Returns:\n str, int: symbol and site index\n \"\"\"\n if isinstance(absorbing_atom, str):\n return absorbing_atom, structure.indices_from_symbol(absorbing_atom)[0]\n elif isinstance(absorbing_atom, int):\n return str(structure[absorbing_atom].specie), absorbing_atom\n else:\n raise ValueError(\"absorbing_atom must be either specie symbol or site index\")\n"
] | [
[
"numpy.dot",
"numpy.array_equal",
"numpy.multiply",
"numpy.around",
"numpy.eye",
"numpy.subtract",
"scipy.stats.describe",
"scipy.spatial.KDTree",
"numpy.add",
"numpy.array"
],
[
"numpy.linalg.det",
"numpy.array",
"numpy.average",
"numpy.sum"
],
[
"numpy.dot",
"numpy.sum",
"numpy.eye",
"numpy.ones",
"scipy.linalg.cholesky",
"numpy.identity",
"numpy.transpose",
"scipy.linalg.sqrtm",
"numpy.array",
"numpy.zeros",
"numpy.trace"
],
[
"numpy.linalg.norm"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mpopescu/compas | [
"55f259607deea501f862cbaea79bd97d7e56ead6",
"55f259607deea501f862cbaea79bd97d7e56ead6"
] | [
"src/compas/numerical/pca/pca_numpy.py",
"tests/compas/geometry/test_transformations/test_transformations.py"
] | [
"from __future__ import print_function\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nfrom numpy import asarray\nfrom scipy.linalg import svd\n\n\n__all__ = ['pca_numpy']\n\n\ndef pca_numpy(data):\n \"\"\"Compute the principle components of a set of data points.\n\n Parameters\n ----------\n data : list\n A list of `m` observations, measuring `n` variables.\n For example, if the data are points in 2D space, the data parameter\n should contain `m` nested lists of `2` variables, the `x` and `y`\n coordinates.\n\n Returns\n -------\n tuple\n * The ``mean of the data points``.\n * The principle directions.\n The number of principle directions is equal to the dimensionality of the data.\n For example, if the data points are locations in 3D space, three principle components will be returned.\n If the data points are locations in 2D space, only two principle components will be returned.\n * The *spread* of the data along the principle directions.\n\n Notes\n -----\n PCA of a dataset finds the directions along which the variance of the data\n is largest, i.e. the directions along which the data is most spread out.\n\n Examples\n --------\n >>>\n\n \"\"\"\n X = asarray(data)\n n, dim = X.shape\n\n assert n >= dim, \"The number of observations (n) should be higher than the number of measured variables (dimensions).\"\n\n # the average of the observations for each of the variables\n # for example, if the data are 2D point coordinates,\n # the average is the average of the x-coordinate across all observations\n # and the average of the y-coordinate across all observations\n mean = (X.sum(axis=0) / n).reshape((-1, dim))\n\n # the spread matrix\n # i.e. the variation of each variable compared to the average of the variable\n # across all observations\n Y = X - mean\n\n # covariance matrix of spread\n # note: there is a covariance function in NumPy...\n # the shape of the covariance matrix is dim x dim\n # for example, if the data are 2D point coordinates, the shape of C is 2 x 2\n # the diagonal of the covariance matrix contains the variance of each variable\n # the off-diagonal elements of the covariance matrix contain the covariance\n # of two independent variables\n C = Y.T.dot(Y) / (n - 1)\n\n assert C.shape[0] == dim, \"The shape of the covariance matrix is not correct.\"\n\n # SVD of covariance matrix\n u, s, vT = svd(C, full_matrices=False)\n\n # eigenvectors\n # ------------\n # note: the eigenvectors are normalized\n # note: vT is exactly what it says it will be => the transposed eigenvectors\n # => take the rows of vT, or the columns of v\n # the right-singular vectors of C (the columns of V or the rows of Vt)\n # are the eigenvectors of CtC\n eigenvectors = vT\n\n # eigenvalues\n # -----------\n # the nonzero singular values of C are the square roots\n # of the nonzero eigenvalues of CtC and CCt\n eigenvalues = s\n\n # return\n return mean[0], eigenvectors, eigenvalues\n\n\n# ==============================================================================\n# Main\n# ==============================================================================\n\nif __name__ == \"__main__\":\n\n import doctest\n\n doctest.testmod(globs=globals())\n",
"import pytest\n\n# from compas.geometry.transformations import homogenize\n# from compas.geometry.transformations import dehomogenize\nfrom compas.geometry.transformations import transform_points\nfrom compas.geometry.transformations import transform_vectors\nfrom compas.geometry.transformations import translate_points\nfrom compas.geometry.transformations import translate_points_xy\nfrom compas.geometry.transformations import scale_points\nfrom compas.geometry.transformations import scale_points_xy\nfrom compas.geometry.transformations import rotate_points\nfrom compas.geometry.transformations import rotate_points_xy\nfrom compas.geometry.transformations import mirror_vector_vector\nfrom compas.geometry.transformations import mirror_points_point\nfrom compas.geometry.transformations import mirror_points_point_xy\nfrom compas.geometry.transformations import mirror_points_line\nfrom compas.geometry.transformations import mirror_points_line_xy\nfrom compas.geometry.transformations import mirror_points_plane\nfrom compas.geometry.transformations import project_point_plane\nfrom compas.geometry.transformations import project_points_plane\nfrom compas.geometry.transformations import project_point_line\nfrom compas.geometry.transformations import project_point_line_xy\nfrom compas.geometry.transformations import project_points_line\nfrom compas.geometry.transformations import project_points_line_xy\nfrom compas.geometry.transformations import reflect_line_plane\nfrom compas.geometry.transformations import reflect_line_triangle\nfrom compas.geometry.transformations import orient_points\n\nfrom compas.geometry.transformations import Translation\nfrom compas.geometry.transformations import Rotation\n\nfrom compas.geometry import intersection_segment_segment_xy\nimport numpy as np\n\n\[email protected]\ndef T():\n return Translation([1, 2, 3])\n\n\[email protected]\ndef R():\n return Rotation.from_euler_angles([90, 0, 0])\n\n\ndef test_transform_points(T):\n assert transform_points([[0, 0, 1], [1, 0, 0]], T) == [[1.0, 2.0, 4.0], [2.0, 2.0, 3.0]]\n\n\ndef test_transform_vectors(R):\n assert transform_vectors([[1, 2, 3], [5, 6, 7]], R) == [[1.0, -3.5781372230600135, 0.44377247881360526], [5.0, -8.946418341978926, 2.227464668699156]]\n\n\n# def test_homogenize():\n# assert homogenize([[1, 2, 3]], 0.5) == [[0.5, 1.0, 1.5, 0.5]]\n\n\n# def test_dehomogenize():\n# assert dehomogenize([[0.5, 1.0, 1.5, 0.5]]) == [[1, 2, 3]]\n\n\n# def test_local_axes():\n# pass\n\n\ndef test_translate_points():\n assert translate_points([[0, 1, 2]], [3, 4, 5]) == [[3, 5, 7]]\n\n\ndef test_translate_points_xy():\n assert translate_points_xy([[0, 1, 2]], [3, 4, 5]) == [[3, 5, 0.0]]\n\n\ndef test_scale_points():\n assert scale_points([[0, 1, 2]], 5) == [[0, 5, 10]]\n\n\ndef test_scale_points_xy():\n assert scale_points_xy([[0, 1, 2]], 5) == [[0.0, 5.0, 0.0]]\n\n\ndef test_rotate_points():\n assert np.allclose(rotate_points([[0, 1, 2]], 1), [[-0.8414709848078965, 0.5403023058681398, 2.0]])\n\n\ndef test_rotate_points_xy():\n assert np.allclose(rotate_points_xy([[0, 1, 2]], 1), [[-0.8414709848078965, 0.5403023058681398, 0.0]])\n\n\ndef test_mirror_vector_vector():\n assert mirror_vector_vector([0, 1, 2], [3, 4, 5]) == [-84, -111, -138]\n # TODO: is this correct?\n\n\ndef test_mirror_points_point():\n assert mirror_points_point([[0, 1, 2]], [3, 4, 5]) == [[6, 7, 8]]\n\n\ndef test_mirror_points_point_xy():\n assert mirror_points_point_xy([[0, 1, 2]], [3, 4, 5]) == [[6, 7, 0.0]]\n\n\ndef test_mirror_points_line():\n assert np.allclose(mirror_points_line([[0, 1, 2]], ([3, 4, 5], [6, 7, 8.8])), [[0.281134401972873, 1.281134401972873, 1.5561035758323052]])\n\n\ndef test_mirror_points_line_xy():\n assert mirror_points_line_xy([[0, 2.5, 2]], ([3, 4, 5], [6, 7, 8.8])) == [[0.75, 1.75, 0.0]]\n\n\ndef test_mirror_points_plane():\n assert np.allclose(mirror_points_plane([[0, 2.5, 2]], ([3, 4, 5], [6, 7, 8.8])), [[4.055651317409505, 7.231593203644422, 7.948288598867276]])\n\n\ndef test_project_point_plane():\n assert np.allclose(project_point_plane([0, 2.5, 2], ([3, 4, 5], [6, 7, 8.8])), [2.0278256587047525, 4.865796601822211, 4.974144299433638])\n\n\ndef test_project_points_plane():\n assert np.allclose(project_points_plane([[0, 2.5, 2]], ([3, 4, 5], [6, 7, 8.8])), [[2.0278256587047525, 4.865796601822211, 4.974144299433638]])\n\n\ndef test_project_point_line():\n assert np.allclose(project_point_line([0, 1, 2], ([3, 4, 5], [6, 7, 8.8])), [0.281134401972873, 1.281134401972873, 1.5561035758323052])\n\n\ndef test_project_point_line_xy():\n assert project_point_line_xy([0, 1, 2], ([3, 4, 5], [6, 7, 8.8])) == [0.0, 1.0, 0.0]\n # TODO: is this correct?\n\n\ndef test_project_points_line():\n assert np.allclose(project_points_line([[0, 1, 2]], ([3, 4, 5], [6, 7, 8.8])), [[0.281134401972873, 1.281134401972873, 1.5561035758323052]])\n\n\ndef test_project_points_line_xy():\n assert project_points_line_xy([[0, 1, 2]], ([3, 4, 5], [6, 7, 8.8])) == [[0.0, 1.0, 0.0]]\n # TODO: is this correct?\n\n\ndef test_reflect_line_plane():\n plane = [0, 0, 0], [0, 1, 0]\n line = [-1, 1, 0], [-0.5, 0.5, 0]\n assert reflect_line_plane(line, plane) == ([0.0, 0.0, 0.0], [1.0, 1.0, 0.0])\n\n\ndef test_reflect_line_triangle():\n triangle = [1.0, 0, 0], [-1.0, 0, 0], [0, 0, 1.0]\n line = [-1, 1, 0], [-0.5, 0.5, 0]\n assert reflect_line_triangle(line, triangle) == ([0.0, 0.0, 0], [1.0, 1.0, 0])\n\n\ndef test_orient_points():\n refplane = ([0.57735, 0.57735, 0.57735], [1.0, 1.0, 1.0])\n tarplane = ([0.0, 0.0, 0.0], [0.0, 0.0, 1.0])\n\n points = [\n [0.288675, 0.288675, 1.1547],\n [0.866025, 0.866025, 0.0],\n [1.077350, 0.077350, 0.57735],\n [0.077350, 1.077350, 0.57735]\n ]\n\n points = orient_points(points, refplane, tarplane)\n\n ab = points[0], points[1]\n cd = points[2], points[3]\n\n point = intersection_segment_segment_xy(ab, cd)\n\n points = orient_points([point], tarplane, refplane)\n\n assert np.allclose(points[0], [0.57735, 0.57735, 0.57735])\n"
] | [
[
"numpy.asarray",
"scipy.linalg.svd"
],
[
"numpy.allclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dasolhwang/tf-mobilenet-v2 | [
"e0e8f936e63e14561d9d25b77256d1cadb85172a"
] | [
"mobilenet_v2.py"
] | [
"\"\"\"\nMobileNet v2.\n\nAs described in https://arxiv.org/abs/1801.04381\n\n Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple, OrderedDict\n\nimport functools\nimport tensorflow as tf\n\nslim = tf.contrib.slim\n\nConv = namedtuple('Conv', ['kernel', 'stride', 'channel'])\nInvertedBottleneck = namedtuple('InvertedBottleneck', ['up_sample', 'channel', 'stride', 'repeat'])\n\n# Sequence of layers, described in Table 2\n_CONV_DEFS = [\n Conv(kernel=[3, 3], stride=2, channel=32), # first block, input 224x224x3\n InvertedBottleneck(up_sample=1, channel=16, stride=1, repeat=1), # second block, input : 112x112x32\n InvertedBottleneck(up_sample=6, channel=24, stride=2, repeat=2), # third block, input: 112x112x16\n InvertedBottleneck(up_sample=6, channel=32, stride=2, repeat=3), # fourth block, input: 56x56x24\n InvertedBottleneck(up_sample=6, channel=64, stride=2, repeat=4), # fifth block, input: 28x28x32\n InvertedBottleneck(up_sample=6, channel=96, stride=1, repeat=3), # sixth block, input: 28x28x64\n InvertedBottleneck(up_sample=6, channel=160, stride=2, repeat=3), # seventh block, input: 14x14x96\n InvertedBottleneck(up_sample=6, channel=320, stride=1, repeat=1), # eighth block, input: 7x7x160\n Conv(kernel=[1, 1], stride=1, channel=1280),\n # AvgPool(kernel=[7, 7]),\n # Conv(kernel=[1, 1], stride=1, channel='num_class')\n]\n\n\ndef mobilenet_v2_base(inputs,\n final_endpoint='Conv2d_13_pointwise',\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n scope=None):\n if depth_multiplier <= 0:\n raise ValueError('depth_multiplier is not greater than zero.')\n\n depth = lambda d: max(int(d * depth_multiplier), min_depth)\n end_points = OrderedDict()\n\n if conv_defs is None:\n conv_defs = _CONV_DEFS\n\n net = inputs\n with tf.variable_scope(scope, 'MobilenetV2', [inputs]):\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d], padding='SAME'):\n for i, conv_def in enumerate(conv_defs):\n\n end_point = ''\n if isinstance(conv_def, Conv):\n end_point = 'Conv2d_%d' % i\n num_channel = depth(conv_def.channel)\n net = slim.conv2d(net, num_channel, conv_def.kernel,\n activation_fn=tf.nn.relu6,\n stride=conv_def.stride,\n scope=end_point)\n end_points[end_point] = net\n elif isinstance(conv_def, InvertedBottleneck):\n stride = conv_def.stride\n\n if conv_def.repeat <= 0:\n raise ValueError('repeat value of inverted bottleneck should be greater than zero.')\n\n for j in range(conv_def.repeat):\n end_point = 'InvertedBottleneck_%d_%d' % (i, j)\n prev_output = net\n net = slim.conv2d(net, conv_def.up_sample * net.get_shape().as_list()[-1], [1, 1],\n activation_fn=tf.nn.relu6,\n scope=end_point + '_inverted_bottleneck')\n end_points[end_point + '_inverted_bottleneck'] = net\n net = slim.separable_conv2d(net, None, [3, 3],\n depth_multiplier=1,\n stride=stride,\n activation_fn=tf.nn.relu6,\n scope=end_point + '_dwise')\n end_points[end_point + '_dwise'] = net\n\n num_channel = depth(conv_def.channel)\n net = slim.conv2d(net, num_channel, [1, 1],\n activation_fn=None,\n scope=end_point + '_linear')\n end_points[end_point + '_linear'] = net\n\n if stride == 1:\n if prev_output.get_shape().as_list()[-1] != net.get_shape().as_list()[-1]:\n # Assumption based on previous ResNet papers: If the number of filters doesn't match,\n # there should be a conv 1x1 operation.\n # reference(pytorch) : https://github.com/MG2033/MobileNet-V2/blob/master/layers.py#L29\n prev_output = slim.conv2d(prev_output, num_channel, [1, 1],\n activation_fn=None,\n biases_initializer=None,\n scope=end_point + '_residual_match')\n\n # as described in Figure 4.\n net = tf.add(prev_output, net, name=end_point + '_residual_add')\n end_points[end_point + '_residual_add'] = net\n\n stride = 1\n else:\n raise ValueError('CONV_DEF is not valid.')\n\n if end_point == final_endpoint:\n break\n\n return net, end_points\n\n\ndef mobilenet_v2_cls(inputs,\n num_classes=1000,\n dropout_keep_prob=0.999,\n is_training=True,\n min_depth=8,\n depth_multiplier=1.0,\n conv_defs=None,\n prediction_fn=tf.contrib.layers.softmax,\n reuse=None,\n scope='MobilenetV2'):\n input_shape = inputs.get_shape().as_list()\n if len(input_shape) != 4:\n raise ValueError('Invalid input tensor rank, expected 4, was: %d' %\n len(input_shape))\n\n with tf.variable_scope(scope, 'MobilenetV2', [inputs], reuse=reuse) as scope:\n with slim.arg_scope([slim.batch_norm, slim.dropout], is_training=is_training):\n net, end_points = mobilenet_v2_base(inputs, scope=scope,\n min_depth=min_depth,\n depth_multiplier=depth_multiplier,\n conv_defs=conv_defs)\n with tf.variable_scope('Logits'):\n # class\n if num_classes:\n net = slim.dropout(net, keep_prob=dropout_keep_prob, is_training=is_training, scope='Dropout_1')\n # global pool\n # Issue #1 : https://github.com/ildoonet/tf-mobilenet-v2/issues/1\n net = tf.reduce_mean(net, [1, 2], keepdims=True, name='Global_pool')\n end_points['Global_pool'] = net\n\n # classification\n net = slim.dropout(net, keep_prob=dropout_keep_prob, is_training=is_training, scope='Dropout_2')\n net = slim.conv2d(net, num_classes, [1, 1], activation_fn=None,\n normalizer_fn=None, scope='Conv2d_1c_1x1')\n net = slim.flatten(net, scope='Flatten')\n end_points['Logits'] = net\n\n if prediction_fn:\n end_points['Predictions'] = prediction_fn(net, scope='Predictions')\n\n return net, end_points\n\n\ndef wrapped_partial(func, *args, **kwargs):\n partial_func = functools.partial(func, *args, **kwargs)\n functools.update_wrapper(partial_func, func)\n return partial_func\n\n\nmobilenet_v2_cls_075 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.75)\nmobilenet_v2_cls_050 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.50)\nmobilenet_v2_cls_025 = wrapped_partial(mobilenet_v2_cls, depth_multiplier=0.25)\n\n\ndef mobilenet_v2_arg_scope(is_training=True,\n weight_decay=0.0004,\n stddev=0.01,\n regularize_depthwise=False):\n \"\"\"Defines the default MobilenetV2 arg scope.\n Args:\n is_training: Whether or not we're training the model.\n weight_decay: The weight decay to use for regularizing the model.\n stddev: The standard deviation of the trunctated normal weight initializer.\n regularize_depthwise: Whether or not apply regularization on depthwise.\n Returns:\n An `arg_scope` to use for the mobilenet v2 model.\n \"\"\"\n batch_norm_params = {\n 'is_training': is_training,\n 'center': True,\n 'scale': True,\n 'decay': 0.999,\n 'epsilon': 0.0001,\n 'fused': True,\n 'zero_debias_moving_mean': True\n }\n\n # Set weight_decay for weights in Conv and DepthSepConv layers.\n weights_init = tf.truncated_normal_initializer(stddev=stddev)\n regularizer = tf.contrib.layers.l2_regularizer(weight_decay)\n if regularize_depthwise:\n depthwise_regularizer = regularizer\n else:\n depthwise_regularizer = None\n with slim.arg_scope([slim.conv2d, slim.separable_conv2d],\n weights_initializer=weights_init,\n normalizer_fn=slim.batch_norm,\n normalizer_params=batch_norm_params):\n with slim.arg_scope([slim.batch_norm], **batch_norm_params):\n with slim.arg_scope([slim.conv2d], weights_regularizer=regularizer):\n with slim.arg_scope([slim.separable_conv2d], weights_regularizer=depthwise_regularizer) as sc:\n return sc\n"
] | [
[
"tensorflow.reduce_mean",
"tensorflow.truncated_normal_initializer",
"tensorflow.add",
"tensorflow.contrib.layers.l2_regularizer",
"tensorflow.variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
JuliaSprenger/spikeinterface | [
"d5d3d3992a6d430d7008e16db4ee030734e685e5",
"d5d3d3992a6d430d7008e16db4ee030734e685e5"
] | [
"spikeinterface/widgets/unitlocalization.py",
"examples/getting_started/plot_getting_started.py"
] | [
"import numpy as np\nimport matplotlib.pylab as plt\nfrom .basewidget import BaseWidget\n\nfrom probeinterface.plotting import plot_probe\n\nfrom spikeinterface.toolkit import compute_unit_centers_of_mass\n\nfrom .utils import get_unit_colors\n\n\nclass UnitLocalizationWidget(BaseWidget):\n \"\"\"\n Plot unit localization on probe.\n\n Parameters\n ----------\n waveform_extractor: WaveformaExtractor\n WaveformaExtractorr object\n peaks: None or numpy array\n Optionally can give already detected peaks\n to avoid multiple computation.\n unit_localisation: None or 2d array\n If None then it is computed with 'method' option\n method: str default 'center_of_mass'\n Method used to estimate unit localization if 'unit_localisation' is None\n method_kwargs: dict\n Option for the method\n unit_colors: None or dict\n A dict key is unit_id and value is any color format handled by matplotlib.\n If None, then the get_unit_colors() is internally used.\n figure: matplotlib figure\n The figure to be used. If not given a figure is created\n ax: matplotlib axis\n The axis to be used. If not given an axis is created\n\n Returns\n -------\n W: ProbeMapWidget\n The output widget\n \"\"\"\n\n def __init__(self, waveform_extractor, unit_localisation=None,\n method='center_of_mass', method_kwargs={'peak_sign': 'neg', 'num_channels': 10},\n unit_colors=None,\n figure=None, ax=None):\n BaseWidget.__init__(self, figure, ax)\n\n self.waveform_extractor = waveform_extractor\n self.unit_localisation = unit_localisation\n self.method = method\n self.method_kwargs = method_kwargs\n\n if unit_colors is None:\n unit_colors = get_unit_colors(waveform_extractor.sorting)\n self.unit_colors = unit_colors\n\n def plot(self):\n we = self.waveform_extractor\n unit_localisation = self.unit_localisation\n unit_ids = we.sorting.unit_ids\n\n if unit_localisation is None:\n assert self.method in ('center_of_mass',)\n\n if self.method == 'center_of_mass':\n coms = compute_unit_centers_of_mass(we, **self.method_kwargs)\n localisation = np.array([e for e in coms.values()])\n else:\n raise ValueError('UnitLocalizationWidget: method not implemented.')\n\n ax = self.ax\n probe = we.recording.get_probe()\n probe_shape_kwargs = dict(facecolor='w', edgecolor='k', lw=0.5, alpha=1.)\n contacts_kargs = dict(alpha=1., edgecolor='k', lw=0.5)\n poly_contact, poly_contour = plot_probe(probe, ax=ax,\n contacts_colors='w', contacts_kargs=contacts_kargs,\n probe_shape_kwargs=probe_shape_kwargs)\n poly_contact.set_zorder(2)\n if poly_contour is not None:\n poly_contour.set_zorder(1)\n\n ax.set_title('')\n\n color = np.array([self.unit_colors[unit_id] for unit_id in unit_ids])\n loc = ax.scatter(localisation[:, 0], localisation[:, 1], marker='1', color=color, s=80, lw=3)\n loc.set_zorder(3)\n\n\ndef plot_unit_localization(*args, **kwargs):\n W = UnitLocalizationWidget(*args, **kwargs)\n W.plot()\n return W\n\n\nplot_unit_localization.__doc__ = UnitLocalizationWidget.__doc__\n",
"\"\"\"\nGetting started tutorial\n========================\n\nIn this introductory example, you will see how to use the :code:`spikeinterface` to perform a full electrophysiology analysis.\nWe will first create some simulated data, and we will then perform some pre-processing, run a couple of spike sorting\nalgorithms, inspect and validate the results, export to Phy, and compare spike sorters.\n\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\n##############################################################################\n# The spikeinterface module by itself import only the spikeinterface.core submodule\n# which is not useful for end user\n\nimport spikeinterface\n\n##############################################################################\n# We need to import one by one different submodules separately (preferred).\n# There are 5 modules:\n#\n# - :code:`extractors` : file IO\n# - :code:`toolkit` : processing toolkit for pre-, post-processing, validation, and automatic curation\n# - :code:`sorters` : Python wrappers of spike sorters\n# - :code:`comparison` : comparison of spike sorting output\n# - :code:`widgets` : visualization\n\nimport spikeinterface as si # import core only\nimport spikeinterface.extractors as se\nimport spikeinterface.toolkit as st\nimport spikeinterface.sorters as ss\nimport spikeinterface.comparison as sc\nimport spikeinterface.widgets as sw\n\n##############################################################################\n# We can also import all submodules at once with this\n# this internally import core+extractors+toolkit+sorters+comparison+widgets+exporters\n#\n# This is useful for notebooks but this is a more heavy import because internally many more dependency\n# are imported (scipy/sklearn/networkx/matplotlib/h5py...)\n\nimport spikeinterface.full as si\n\n##############################################################################\n# First, let's download a simulated dataset from the\n# 'https://gin.g-node.org/NeuralEnsemble/ephy_testing_data' repo\n#\n# Then we can open it. Note that `MEArec <https://mearec.readthedocs.io>`_ simulated file\n# contains both \"recording\" and a \"sorting\" object.\n\nlocal_path = si.download_dataset(remote_path='mearec/mearec_test_10s.h5')\nrecording, sorting_true = se.read_mearec(local_path)\nprint(recording)\nprint(sorting_true)\n\n##############################################################################\n# :code:`recording` is a :code:`RecordingExtractor` object, which extracts information about channel ids, channel locations\n# (if present), the sampling frequency of the recording, and the extracellular traces. :code:`sorting_true` is a\n# :code:`SortingExtractor` object, which contains information about spike-sorting related information, including unit ids,\n# spike trains, etc. Since the data are simulated, :code:`sorting_true` has ground-truth information of the spiking\n# activity of each unit.\n#\n# Let's use the :code:`widgets` module to visualize the traces and the raster plots.\n\nw_ts = sw.plot_timeseries(recording, time_range=(0, 5))\nw_rs = sw.plot_rasters(sorting_true, time_range=(0, 5))\n\n##############################################################################\n# This is how you retrieve info from a :code:`RecordingExtractor`...\n\nchannel_ids = recording.get_channel_ids()\nfs = recording.get_sampling_frequency()\nnum_chan = recording.get_num_channels()\nnum_seg = recording.get_num_segments()\n\nprint('Channel ids:', channel_ids)\nprint('Sampling frequency:', fs)\nprint('Number of channels:', num_chan)\nprint('Number of segments:', num_seg)\n\n##############################################################################\n# ...and a :code:`SortingExtractor`\n\nnum_seg = recording.get_num_segments()\nunit_ids = sorting_true.get_unit_ids()\nspike_train = sorting_true.get_unit_spike_train(unit_id=unit_ids[0])\n\nprint('Number of segments:', num_seg)\nprint('Unit ids:', unit_ids)\nprint('Spike train of first unit:', spike_train)\n\n##################################################################\n# :code:`spikeinterface` internally uses the :code:`probeinterface`\n# to handle Probe and ProbeGroup.\n# So any probe in the probeinterface collections can be download\n# and set to a Recording object.\n# In this case, the MEArec dataset already handles a Probe and we don't need to set it.\n\nprobe = recording.get_probe()\nprint(probe)\n\nfrom probeinterface.plotting import plot_probe\n\nplot_probe(probe)\n\n##############################################################################\n# Using the :code:`toolkit`, you can perform preprocessing on the recordings.\n# Each pre-processing function also returns a :code:`RecordingExtractor`,\n# which makes it easy to build pipelines. Here, we filter the recording and\n# apply common median reference (CMR).\n# All theses preprocessing steps are \"lazy\". The computation is done on demand when we call\n# `recording.get_traces(...)` or when we save the object to disk.\n\nrecording_cmr = recording\nrecording_f = st.bandpass_filter(recording, freq_min=300, freq_max=6000)\nprint(recording_f)\nrecording_cmr = st.common_reference(recording_f, reference='global', operator='median')\nprint(recording_cmr)\n\n# this computes and saves the recording after applying the preprocessing chain\nrecording_preprocessed = recording_cmr.save(format='binary')\nprint(recording_preprocessed)\n\n##############################################################################\n# Now you are ready to spike sort using the :code:`sorters` module!\n# Let's first check which sorters are implemented and which are installed\n\nprint('Available sorters', ss.available_sorters())\nprint('Installed sorters', ss.installed_sorters())\n\n##############################################################################\n# The :code:`ss.installed_sorters()` will list the sorters installed in the machine.\n# We can see we have HerdingSpikes and Tridesclous installed.\n# Spike sorters come with a set of parameters that users can change.\n# The available parameters are dictionaries and can be accessed with:\n\nprint(ss.get_default_params('herdingspikes'))\nprint(ss.get_default_params('tridesclous'))\n\n##############################################################################\n# Let's run herdingspikes and change one of the parameter, say, the detect_threshold:\n\nsorting_HS = ss.run_herdingspikes(recording=recording_preprocessed, detect_threshold=4)\nprint(sorting_HS)\n\n##############################################################################\n# Alternatively we can pass full dictionary containing the parameters:\n\nother_params = ss.get_default_params('herdingspikes')\nother_params['detect_threshold'] = 5\n\n# parameters set by params dictionary\nsorting_HS_2 = ss.run_herdingspikes(recording=recording_preprocessed, output_folder=\"redringspikes_output2\",\n **other_params)\nprint(sorting_HS_2)\n\n##############################################################################\n# Let's run tridesclous as well, with default parameters:\n\nsorting_TDC = ss.run_tridesclous(recording=recording_preprocessed)\n\n##############################################################################\n# The :code:`sorting_HS` and :code:`sorting_TDC` are :code:`SortingExtractor`\n# objects. We can print the units found using:\n\nprint('Units found by herdingspikes:', sorting_HS.get_unit_ids())\nprint('Units found by tridesclous:', sorting_TDC.get_unit_ids())\n\n##############################################################################\n# :code:`spikeinterface` provides a efficient way to extractor waveform snippets from paired recording/sorting objects.\n# The :code:`WaveformExtractor` class samples some spikes (:code:`max_spikes_per_unit=500`) for each cluster and stores\n# them on disk. These waveforms per cluster are helpful to compute the average waveform, or \"template\", for each unit\n# and then to compute, for example, quality metrics.\n\nwe_TDC = si.WaveformExtractor.create(recording_preprocessed, sorting_TDC, 'waveforms', remove_if_exists=True)\nwe_TDC.set_params(ms_before=3., ms_after=4., max_spikes_per_unit=500)\nwe_TDC.run(n_jobs=-1, chunk_size=30000)\nprint(we_TDC)\n\nunit_id0 = sorting_TDC.unit_ids[0]\nwavefroms = we_TDC.get_waveforms(unit_id0)\nprint(wavefroms.shape)\n\ntemplate = we_TDC.get_template(unit_id0)\nprint(template.shape)\n\n##############################################################################\n# Once we have the `WaveformExtractor` object\n# we can post-process, validate, and curate the results. With\n# the :code:`toolkit.postprocessing` submodule, one can, for example,\n# get waveforms, templates, maximum channels, PCA scores, or export the data\n# to Phy. `Phy <https://github.com/cortex-lab/phy>`_ is a GUI for manual\n# curation of the spike sorting output. To export to phy you can run:\n\nfrom spikeinterface.exporters import export_to_phy\n\nexport_to_phy(we_TDC, './phy_folder_for_TDC',\n compute_pc_features=False, compute_amplitudes=True)\n\n##############################################################################\n# Then you can run the template-gui with: :code:`phy template-gui phy/params.py`\n# and manually curate the results.\n\n\n##############################################################################\n# Quality metrics for the spike sorting output are very important to asses the spike sorting performance.\n# The :code:`spikeinterface.toolkit.qualitymetrics` module implements several quality metrics\n# to assess the goodness of sorted units. Among those, for example,\n# are signal-to-noise ratio, ISI violation ratio, isolation distance, and many more.\n# Theses metrics are built on top of WaveformExtractor class and return a dictionary with the unit ids as keys:\n\nsnrs = st.compute_snrs(we_TDC)\nprint(snrs)\nisi_violations_rate, isi_violations_count = st.compute_isi_violations(we_TDC, isi_threshold_ms=1.5)\nprint(isi_violations_rate)\nprint(isi_violations_count)\n\n##############################################################################\n# All theses quality metrics can be computed in one shot and returned as\n# a :code:`pandas.Dataframe`\n\nmetrics = st.compute_quality_metrics(we_TDC, metric_names=['snr', 'isi_violation', 'amplitude_cutoff'])\nprint(metrics)\n\n##############################################################################\n# Quality metrics can be also used to automatically curate the spike sorting\n# output. For example, you can select sorted units with a SNR above a\n# certain threshold:\n\nkeep_mask = (metrics['snr'] > 7.5) & (metrics['isi_violations_rate'] < 0.01)\nprint(keep_mask)\n\nkeep_unit_ids = keep_mask[keep_mask].index.values\nprint(keep_unit_ids)\n\ncurated_sorting = sorting_TDC.select_units(keep_unit_ids)\nprint(curated_sorting)\n\n##############################################################################\n# The final part of this tutorial deals with comparing spike sorting outputs.\n# We can either (1) compare the spike sorting results with the ground-truth\n# sorting :code:`sorting_true`, (2) compare the output of two (HerdingSpikes\n# and Tridesclous), or (3) compare the output of multiple sorters:\n\ncomp_gt_TDC = sc.compare_sorter_to_ground_truth(gt_sorting=sorting_true, tested_sorting=sorting_TDC)\ncomp_TDC_HS = sc.compare_two_sorters(sorting1=sorting_TDC, sorting2=sorting_HS)\ncomp_multi = sc.compare_multiple_sorters(sorting_list=[sorting_TDC, sorting_HS],\n name_list=['tdc', 'hs'])\n\n##############################################################################\n# When comparing with a ground-truth sorting extractor (1), you can get the sorting performance and plot a confusion\n# matrix\n\ncomp_gt_TDC.get_performance()\nw_conf = sw.plot_confusion_matrix(comp_gt_TDC)\nw_agr = sw.plot_agreement_matrix(comp_gt_TDC)\n\n##############################################################################\n# When comparing two sorters (2), we can see the matching of units between sorters.\n# Units which are not matched has -1 as unit id:\n\ncomp_TDC_HS.hungarian_match_12\n\n##############################################################################\n# or the reverse:\n\ncomp_TDC_HS.hungarian_match_21\n\n##############################################################################\n# When comparing multiple sorters (3), you can extract a :code:`SortingExtractor` object with units in agreement\n# between sorters. You can also plot a graph showing how the units are matched between the sorters.\n\nsorting_agreement = comp_multi.get_agreement_sorting(minimum_agreement_count=2)\n\nprint('Units in agreement between Klusta and Mountainsort4:', sorting_agreement.get_unit_ids())\n\nw_multi = sw.plot_multicomp_graph(comp_multi)\n\nplt.show()\n"
] | [
[
"numpy.array"
],
[
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
MRD-Git/Huggingface-course | [
"7c0440584e630cb8885c2a237bc6e8213cfd5572"
] | [
"drop/multilabel_classification/loss.py"
] | [
"# https://pytorch.org/docs/stable/_modules/torch/nn/modules/loss.ht\nfrom torch.nn.modules.loss import _Loss\nfrom typing import Optional\nfrom torch import Tensor\nimport torch.nn.functional as F\n\nclass BCEWithLogitsLoss(_Loss):\n r\"\"\"This loss combines a `Sigmoid` layer and the `BCELoss` in one single\n class. This version is more numerically stable than using a plain `Sigmoid`\n followed by a `BCELoss` as, by combining the operations into one layer,\n we take advantage of the log-sum-exp trick for numerical stability.\n\n The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:\n\n .. math::\n \\ell(x, y) = L = \\{l_1,\\dots,l_N\\}^\\top, \\quad\n l_n = - w_n \\left[ y_n \\cdot \\log \\sigma(x_n)\n + (1 - y_n) \\cdot \\log (1 - \\sigma(x_n)) \\right],\n\n where :math:`N` is the batch size. If :attr:`reduction` is not ``'none'``\n (default ``'mean'``), then\n\n .. math::\n \\ell(x, y) = \\begin{cases}\n \\operatorname{mean}(L), & \\text{if reduction} = \\text{`mean';}\\\\\n \\operatorname{sum}(L), & \\text{if reduction} = \\text{`sum'.}\n \\end{cases}\n\n This is used for measuring the error of a reconstruction in for example\n an auto-encoder. Note that the targets `t[i]` should be numbers\n between 0 and 1.\n\n It's possible to trade off recall and precision by adding weights to positive examples.\n In the case of multi-label classification the loss can be described as:\n\n .. math::\n \\ell_c(x, y) = L_c = \\{l_{1,c},\\dots,l_{N,c}\\}^\\top, \\quad\n l_{n,c} = - w_{n,c} \\left[ p_c y_{n,c} \\cdot \\log \\sigma(x_{n,c})\n + (1 - y_{n,c}) \\cdot \\log (1 - \\sigma(x_{n,c})) \\right],\n\n where :math:`c` is the class number (:math:`c > 1` for multi-label binary classification,\n :math:`c = 1` for single-label binary classification),\n :math:`n` is the number of the sample in the batch and\n :math:`p_c` is the weight of the positive answer for the class :math:`c`.\n\n :math:`p_c > 1` increases the recall, :math:`p_c < 1` increases the precision.\n\n For example, if a dataset contains 100 positive and 300 negative examples of a single class,\n then `pos_weight` for the class should be equal to :math:`\\frac{300}{100}=3`.\n The loss would act as if the dataset contains :math:`3\\times 100=300` positive examples.\n\n Examples::\n\n >>> target = torch.ones([10, 64], dtype=torch.float32) # 64 classes, batch size = 10\n >>> output = torch.full([10, 64], 1.5) # A prediction (logit)\n >>> pos_weight = torch.ones([64]) # All weights are equal to 1\n >>> criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)\n >>> criterion(output, target) # -log(sigmoid(1.5))\n tensor(0.2014)\n\n Args:\n weight (Tensor, optional): a manual rescaling weight given to the loss\n of each batch element. If given, has to be a Tensor of size `nbatch`.\n size_average (bool, optional): Deprecated (see :attr:`reduction`). By default,\n the losses are averaged over each loss element in the batch. Note that for\n some losses, there are multiple elements per sample. If the field :attr:`size_average`\n is set to ``False``, the losses are instead summed for each minibatch. Ignored\n when :attr:`reduce` is ``False``. Default: ``True``\n reduce (bool, optional): Deprecated (see :attr:`reduction`). By default, the\n losses are averaged or summed over observations for each minibatch depending\n on :attr:`size_average`. When :attr:`reduce` is ``False``, returns a loss per\n batch element instead and ignores :attr:`size_average`. Default: ``True``\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,\n ``'mean'``: the sum of the output will be divided by the number of\n elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``\n pos_weight (Tensor, optional): a weight of positive examples.\n Must be a vector with length equal to the number of classes.\n\n Shape:\n - Input: :math:`(*)`, where :math:`*` means any number of dimensions.\n - Target: :math:`(*)`, same shape as the input.\n - Output: scalar. If :attr:`reduction` is ``'none'``, then :math:`(*)`, same\n shape as input.\n\n Examples::\n\n >>> loss = nn.BCEWithLogitsLoss()\n >>> input = torch.randn(3, requires_grad=True)\n >>> target = torch.empty(3).random_(2)\n >>> output = loss(input, target)\n >>> output.backward()\n \"\"\"\n def __init__(self, weight: Optional[Tensor] = None, size_average=None, reduce=None, reduction: str = 'mean',\n pos_weight: Optional[Tensor] = None) -> None:\n super(BCEWithLogitsLoss, self).__init__(size_average, reduce, reduction)\n self.register_buffer('weight', weight)\n self.register_buffer('pos_weight', pos_weight)\n self.weight: Optional[Tensor]\n self.pos_weight: Optional[Tensor]\n\n def forward(self, inputs, target):\n return F.binary_cross_entropy_with_logits(inputs, target,\n self.weight)"
] | [
[
"torch.nn.functional.binary_cross_entropy_with_logits"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
egbQuantum/strawberryfields | [
"674e4fe2de5e5dd791a77f1cd219009120dcbbbf",
"674e4fe2de5e5dd791a77f1cd219009120dcbbbf"
] | [
"strawberryfields/backends/states.py",
"examples/gaussian_cloning.py"
] | [
"# Copyright 2019 Xanadu Quantum Technologies Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"\n.. _state_class:\n\nQuantum states API\n========================================================\n\n**Module name:** :mod:`strawberryfields.backends.states`\n\n.. currentmodule:: strawberryfields.backends.states\n\nThis module provides classes which represent the quantum state\nreturned by a simulator backend via :class:`.Engine`.\n\n\nBase quantum state\n-------------------------------\n\nAn abstract base class for the representation of quantum states. This class should not be instantiated on its\nown, instead all states will be represented by one of the inheriting subclasses.\n\nThis class contains all methods that should be supported by all\ninheriting classes.\n\n.. note::\n In the following, keyword arguments are denoted ``**kwargs``, and allow additional\n options to be passed to the underlying State class - these are documented where\n available. For more details on relevant keyword arguments, please\n consult the backend documentation directly.\n\n.. currentmodule:: strawberryfields.backends.states.BaseState\n\n.. autosummary::\n data\n hbar\n is_pure\n num_modes\n mode_names\n mode_indices\n reduced_dm\n fock_prob\n mean_photon\n fidelity\n fidelity_vacuum\n fidelity_coherent\n wigner\n quad_expectation\n poly_quad_expectation\n\n\nBase Gaussian state\n-------------------------------\n\nClass for the representation of quantum states using the Gaussian formalism.\nThis class extends the class :class:`~.BaseState` with additional methods\nunique to Gaussian states.\n\nNote that backends using the Gaussian state representation may extend this class with\nadditional methods particular to the backend, for example :class:`~.GaussianState`\nin the :ref:`gaussian_backend`.\n\n.. currentmodule:: strawberryfields.backends.states.BaseGaussianState\n\n.. autosummary::\n means\n cov\n reduced_gaussian\n is_coherent\n is_squeezed\n displacement\n squeezing\n\n\nBase Fock state\n-------------------------------\n\nClass for the representation of quantum states in the Fock basis.\nThis class extends the class :class:`~.BaseState` with additional methods\nunique to states in the Fock-basis representation.\n\nNote that backends using Fock-basis representation may extend this class with\nadditional methods particular to the backend, for example :class:`~.FockStateTF`\nin the :ref:`Tensorflow_backend`.\n\n.. currentmodule:: strawberryfields.backends.states.BaseFockState\n\n.. autosummary::\n cutoff_dim\n ket\n dm\n trace\n all_fock_probs\n\n.. currentmodule:: strawberryfields.backends.states\n\n\nCode details\n~~~~~~~~~~~~\n\n\"\"\"\nimport abc\nimport string\nfrom itertools import chain\nfrom copy import copy\n\nimport numpy as np\nfrom scipy.linalg import block_diag\nfrom scipy.stats import multivariate_normal\nfrom scipy.special import factorial\n\nimport strawberryfields as sf\nfrom .shared_ops import rotation_matrix as _R\nfrom .shared_ops import changebasis\n\nindices = string.ascii_lowercase\n\nclass BaseState(abc.ABC):\n r\"\"\"Abstract base class for the representation of quantum states.\"\"\"\n EQ_TOLERANCE = 1e-10\n\n def __init__(self, num_modes, mode_names=None):\n self._modes = num_modes\n self._hbar = sf.hbar # always use the global frontend hbar value for state objects\n self._data = None\n self._pure = None\n\n if mode_names is None:\n self._modemap = {i:\"mode {}\".format(i) for i in range(num_modes)}\n else:\n self._modemap = {i:'{}'.format(j) for i, j in zip(range(num_modes), mode_names)}\n\n self._str = \"<BaseState: num_modes={}, pure={}, hbar={}>\".format(\n self.num_modes, self._pure, self._hbar)\n\n def __str__(self):\n return self._str\n\n def __repr__(self):\n return self._str\n\n @property\n def data(self):\n r\"\"\"Returns the underlying numerical (or symbolic) representation of the state.\n The form of this data differs for different backends.\"\"\"\n return self._data\n\n @property\n def hbar(self):\n r\"\"\"Returns the value of :math:`\\hbar` used in the generation of the state.\n\n The value of :math:`\\hbar` is a convention chosen in the definition of\n :math:`\\x` and :math:`\\p`. See :ref:`opcon` for more details.\n\n Returns:\n float: :math:`\\hbar` value.\n \"\"\"\n return self._hbar\n\n @property\n def is_pure(self):\n r\"\"\"Checks whether the state is a pure state.\n\n Returns:\n bool: True if and only if the state is pure.\n \"\"\"\n return self._pure\n\n @property\n def num_modes(self):\n r\"\"\"Gets the number of modes that the state represents.\n\n Returns:\n int: the number of modes in the state\n \"\"\"\n return self._modes\n\n @property\n def mode_names(self):\n r\"\"\"Returns a dictionary mapping the mode index to mode names.\n\n The mode names are determined from the initialization argument\n ``mode_names``. If these were not supplied, the names are generated automatically based\n on the mode indices.\n\n Returns:\n dict: dictionary of the form ``{i:\"mode name\",...}``\n \"\"\"\n return self._modemap\n\n @property\n def mode_indices(self):\n r\"\"\"Returns a dictionary mapping the mode names to mode indices.\n\n The mode names are determined from the initialization argument\n ``mode_names``. If these were not supplied, the names are generated automatically based\n on the mode indices.\n\n Returns:\n dict: dictionary of the form ``{\"mode name\":i,...}``\n \"\"\"\n return {v: k for k, v in self._modemap.items()}\n\n @abc.abstractmethod\n def reduced_dm(self, modes, **kwargs):\n r\"\"\"Returns a reduced density matrix in the Fock basis.\n\n Args:\n modes (int or Sequence[int]): specifies the mode(s) to return the reduced density matrix for.\n **kwargs:\n\n * **cutoff** (*int*): (default 10) specifies where to truncate the returned density matrix.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n array: the reduced density matrix for the specified modes\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fock_prob(self, n, **kwargs):\n r\"\"\"Probability of a particular Fock basis state.\n\n Computes the probability :math:`|\\braket{\\vec{n}|\\psi}|^2` of measuring\n the given multi-mode Fock state based on the state :math:`\\ket{\\psi}`.\n\n .. warning::\n\n Computing the Fock probabilities of states has exponential scaling\n in the Gaussian representation (for example states output by a\n Gaussian backend as a :class:`~.BaseGaussianState`).\n This shouldn't affect small-scale problems, where only a few Fock\n basis state probabilities need to be calculated, but will become\n evident in larger scale problems.\n\n Args:\n n (Sequence[int]): the Fock state :math:`\\ket{\\vec{n}}` that we want to measure the probability of\n **kwargs:\n\n * **cutoff** (*int*): (default 10) specifies the fock basis truncation when calculating\n of the fock basis probabilities.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n float: measurement probability\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def mean_photon(self, mode, **kwargs):\n \"\"\"Returns the mean photon number of a particular mode.\n\n Args:\n mode (int): specifies the mode\n **kwargs:\n\n * **cutoff** (*int*): (default 10) Fock basis trunction for calculation of\n mean photon number.\n Note that the cutoff argument only applies for Gaussian representation;\n states represented in the Fock basis will use their own internal cutoff dimension.\n\n Returns:\n tuple: the mean photon number and variance\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity(self, other_state, mode, **kwargs):\n r\"\"\"Fidelity of the reduced state in the specified mode with a user supplied state.\n Note that this method only supports single-mode states.\n\n Args:\n other_state: a pure state vector array represented in the Fock basis (for Fock backends)\n or a Sequence ``(mu, cov)`` containing the means and covariance matrix (for Gaussian backends)\n\n Returns:\n The fidelity of the circuit state with ``other_state``.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_vacuum(self, **kwargs):\n \"\"\"The fidelity of the state with the vacuum state.\n\n Returns:\n float: the fidelity of the state with the vacuum\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_coherent(self, alpha_list, **kwargs):\n r\"\"\"The fidelity of the state with a product of coherent states.\n\n The fidelity is defined by\n\n .. math:: \\bra{\\vec{\\alpha}}\\rho\\ket{\\vec{\\alpha}}\n\n Args:\n alpha_list (Sequence[complex]): list of coherent state parameters, one for each mode\n\n Returns:\n float: the fidelity value\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def wigner(self, mode, xvec, pvec):\n r\"\"\"Calculates the discretized Wigner function of the specified mode.\n\n Args:\n mode (int): the mode to calculate the Wigner function for\n xvec (array): array of discretized :math:`x` quadrature values\n pvec (array): array of discretized :math:`p` quadrature values\n\n Returns:\n array: 2D array of size [len(xvec), len(pvec)], containing reduced Wigner function\n values for specified x and p values.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def quad_expectation(self, mode, phi=0, **kwargs):\n r\"\"\"The :math:`\\x_{\\phi}` operator expectation values and variance for the specified mode.\n\n The :math:`\\x_{\\phi}` operator is defined as follows,\n\n .. math:: \\x_{\\phi} = \\cos\\phi~\\x + \\sin\\phi~\\p\n\n with corresponding expectation value\n\n .. math:: \\bar{x_{\\phi}}=\\langle x_{\\phi}\\rangle = \\text{Tr}(\\x_{\\phi}\\rho_{mode})\n\n and variance\n\n .. math:: \\Delta x_{\\phi}^2 = \\langle x_{\\phi}^2\\rangle - \\braket{x_{\\phi}}^2\n\n Args:\n mode (int): the requested mode\n phi (float): quadrature angle, clockwise from the positive :math:`x` axis.\n\n * :math:`\\phi=0` corresponds to the :math:`x` expectation and variance (default)\n * :math:`\\phi=\\pi/2` corresponds to the :math:`p` expectation and variance\n\n Returns:\n tuple (float, float): expectation value and variance\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n r\"\"\"The multi-mode expectation values and variance of arbitrary 2nd order polynomials\n of quadrature operators.\n\n An arbitrary 2nd order polynomial of quadrature operators over $N$ modes can always\n be written in the following form:\n\n .. math:: P(\\mathbf{r}) = \\mathbf{r}^T A\\mathbf{r} + \\mathbf{r}^T \\mathbf{d} + k I\n\n where:\n\n * :math:`A\\in\\mathbb{R}^{2N\\times 2N}` is a symmetric matrix\n representing the quadratic coefficients,\n * :math:`\\mathbf{d}\\in\\mathbb{R}^{2N}` is a real vector representing\n the linear coefficients,\n * :math:`k\\in\\mathbb{R}` represents the constant term, and\n * :math:`\\mathbf{r} = (\\x_1,\\dots,\\x_N,\\p_1,\\dots,\\p_N)` is the vector\n of quadrature operators in :math:`xp`-ordering.\n\n This method returns the expectation value of this second-order polynomial,\n\n .. math:: \\langle P(\\mathbf{r})\\rangle,\n\n as well as the variance\n\n .. math:: \\Delta P(\\mathbf{r})^2 = \\braket{P(\\mathbf{r})^2} - \\braket{P(\\mathbf{r})}^2\n\n Args:\n A (array): a real symmetric 2Nx2N NumPy array, representing the quadratic\n coefficients of the second order quadrature polynomial.\n d (array): a real length-2N NumPy array, representing the linear\n coefficients of the second order quadrature polynomial. Defaults to the zero vector.\n k (float): the constant term. Default 0.\n phi (float): quadrature angle, clockwise from the positive :math:`x` axis. If provided,\n the vectori of quadrature operators :math:`\\mathbf{r}` is first rotated\n by angle :math:`\\phi` in the phase space.\n\n\n Returns:\n tuple (float, float): expectation value and variance\n \"\"\"\n raise NotImplementedError\n\n\nclass BaseFockState(BaseState):\n r\"\"\"Class for the representation of quantum states in the Fock basis.\n\n Args:\n state_data (array): the state representation in the Fock basis\n num_modes (int): the number of modes in the state\n pure (bool): True if the state is a pure state, false if the state is mixed\n cutoff_dim (int): the Fock basis truncation size\n mode_names (Sequence): (optional) this argument contains a list providing mode names\n for each mode in the state\n \"\"\"\n\n def __init__(self, state_data, num_modes, pure, cutoff_dim, mode_names=None):\n # pylint: disable=too-many-arguments\n\n super().__init__(num_modes, mode_names)\n\n self._data = state_data\n self._cutoff = cutoff_dim\n self._pure = pure\n self._basis = 'fock'\n\n self._str = \"<FockState: num_modes={}, cutoff={}, pure={}, hbar={}>\".format(\n self.num_modes, self._cutoff, self._pure, self._hbar)\n\n def __eq__(self, other):\n \"\"\"Equality operator for BaseFockState.\n\n Returns True if other BaseFockState is close to self.\n This is done by comparing the dm attribute - if within\n the EQ_TOLERANCE, True is returned.\n\n Args:\n other (BaseFockState): BaseFockState to compare against.\n \"\"\"\n if not isinstance(other, type(self)):\n return False\n\n if self.num_modes != other.num_modes:\n return False\n\n if self.data.shape != other.data.shape:\n return False\n\n if np.allclose(self.dm(), other.dm(), atol=self.EQ_TOLERANCE, rtol=0):\n return True\n\n return False\n\n @property\n def cutoff_dim(self):\n r\"\"\"The numerical truncation of the Fock space used by the underlying state.\n Note that a cutoff of D corresponds to the Fock states :math:`\\{|0\\rangle,\\dots,|D-1\\rangle\\}`\n\n Returns:\n int: the cutoff dimension\n \"\"\"\n return self._cutoff\n\n def ket(self, **kwargs):\n r\"\"\"The numerical state vector for the quantum state.\n Note that if the state is mixed, this method returns None.\n\n Returns:\n array/None: the numerical state vector. Returns None if the state is mixed.\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n return self.data\n\n return None # pragma: no cover\n\n def dm(self, **kwargs):\n r\"\"\"The numerical density matrix for the quantum state.\n\n Returns:\n array: the numerical density matrix in the Fock basis\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n left_str = [indices[i] for i in range(0, 2 * self._modes, 2)]\n right_str = [indices[i] for i in range(1, 2 * self._modes, 2)]\n out_str = [indices[: 2 * self._modes]]\n einstr = ''.join(left_str + [','] + right_str + ['->'] + out_str)\n rho = np.einsum(einstr, self.ket(), self.ket().conj())\n return rho\n\n return self.data\n\n def trace(self, **kwargs):\n r\"\"\"Trace of the density operator corresponding to the state.\n\n For pure states the trace corresponds to the squared norm of the ket vector.\n\n For physical states this should always be 1, any deviations from this value are due\n to numerical errors and Hilbert space truncation artefacts.\n\n Returns:\n float: trace of the state\n \"\"\"\n # pylint: disable=unused-argument\n if self.is_pure:\n return np.vdot(self.ket(), self.ket()).real # <s|s>\n\n # need some extra steps to trace over multimode matrices\n eqn_indices = [[indices[idx]] * 2 for idx in range(self._modes)] #doubled indices [['i','i'],['j','j'], ... ]\n eqn = \"\".join(chain.from_iterable(eqn_indices)) # flatten indices into a single string 'iijj...'\n return np.einsum(eqn, self.dm()).real\n\n def all_fock_probs(self, **kwargs):\n r\"\"\"Probabilities of all possible Fock basis states for the current circuit state.\n\n For example, in the case of 3 modes, this method allows the Fock state probability\n :math:`|\\braketD{0,2,3}{\\psi}|^2` to be returned via\n\n .. code-block:: python\n\n probs = state.all_fock_probs()\n probs[0,2,3]\n\n Returns:\n array: array of dimension :math:`\\underbrace{D\\times D\\times D\\cdots\\times D}_{\\text{num modes}}`\n containing the Fock state probabilities, where :math:`D` is the Fock basis cutoff truncation\n \"\"\"\n # pylint: disable=unused-argument\n if self._pure:\n s = np.ravel(self.ket()) # into 1D array\n return np.reshape((s * s.conj()).real, [self._cutoff]*self._modes)\n\n s = self.dm()\n num_axes = len(s.shape)\n evens = [k for k in range(0, num_axes, 2)]\n odds = [k for k in range(1, num_axes, 2)]\n flat_size = np.prod([s.shape[k] for k in range(0, num_axes, 2)])\n transpose_list = evens + odds\n probs = np.diag(np.reshape(np.transpose(s, transpose_list), [flat_size, flat_size])).real\n\n return np.reshape(probs, [self._cutoff]*self._modes)\n\n #=====================================================\n # the following methods are overwritten from BaseState\n\n def reduced_dm(self, modes, **kwargs):\n # pylint: disable=unused-argument\n if modes == list(range(self._modes)):\n # reduced state is full state\n return self.dm() # pragma: no cover\n\n if isinstance(modes, int):\n modes = [modes]\n if modes != sorted(modes):\n raise ValueError(\"The specified modes cannot be duplicated.\")\n\n if len(modes) > self._modes:\n raise ValueError(\"The number of specified modes cannot \"\n \"be larger than the number of subsystems.\")\n\n # reduce rho down to specified subsystems\n keep_indices = indices[: 2 * len(modes)]\n trace_indices = indices[2 * len(modes) : len(modes) + self._modes]\n\n ind = [i * 2 for i in trace_indices]\n ctr = 0\n\n for m in range(self._modes):\n if m in modes:\n ind.insert(m, keep_indices[2 * ctr : 2 * (ctr + 1)])\n ctr += 1\n\n indStr = ''.join(ind) + '->' + keep_indices\n return np.einsum(indStr, self.dm())\n\n def fock_prob(self, n, **kwargs):\n # pylint: disable=unused-argument\n if len(n) != self._modes:\n raise ValueError(\"List length should be equal to number of modes\")\n\n elif max(n) >= self._cutoff:\n raise ValueError(\"Can't get distribution beyond truncation level\")\n\n if self._pure:\n return np.abs(self.ket()[tuple(n)])**2\n\n return self.dm()[tuple([n[i//2] for i in range(len(n)*2)])].real\n\n def mean_photon(self, mode, **kwargs):\n # pylint: disable=unused-argument\n n = np.arange(self._cutoff)\n probs = np.diagonal(self.reduced_dm(mode))\n mean = np.sum(n*probs).real\n var = np.sum(n**2*probs).real - mean**2\n return mean, var\n\n def fidelity(self, other_state, mode, **kwargs):\n # pylint: disable=unused-argument\n max_indices = len(indices) // 2\n\n if self.num_modes > max_indices:\n raise Exception(\"fidelity method can only support up to {} modes\".format(max_indices))\n\n left_indices = indices[:mode]\n eqn_left = \"\".join([i*2 for i in left_indices])\n reduced_dm_indices = indices[mode:mode + 2]\n right_indices = indices[mode + 2:self._modes + 1]\n eqn_right = \"\".join([i*2 for i in right_indices])\n eqn = \"\".join([eqn_left, reduced_dm_indices, eqn_right]) + \"->\" + reduced_dm_indices\n rho_reduced = np.einsum(eqn, self.dm())\n\n return np.dot(np.conj(other_state), np.dot(rho_reduced, other_state)).real\n\n def fidelity_vacuum(self, **kwargs):\n # pylint: disable=unused-argument\n alpha = np.zeros(self._modes)\n return self.fidelity_coherent(alpha)\n\n def fidelity_coherent(self, alpha_list, **kwargs):\n # pylint: disable=too-many-locals,unused-argument\n if self.is_pure:\n mode_size = 1\n s = self.ket()\n else:\n mode_size = 2\n s = self.dm()\n\n if not hasattr(alpha_list, \"__len__\"):\n alpha_list = [alpha_list] # pragma: no cover\n\n if len(alpha_list) != self._modes:\n raise ValueError(\"The number of alpha values must match the number of modes.\")\n\n coh = lambda a, dim: np.array(\n [np.exp(-0.5 * np.abs(a) ** 2) * (a) ** n / np.sqrt(factorial(n)) for n in range(dim)]\n )\n\n if self._modes == 1:\n multi_cohs_vec = coh(alpha_list[0], self._cutoff)\n else:\n multi_cohs_list = [coh(alpha_list[idx], dim) for idx, dim in enumerate(s.shape[::mode_size])]\n eqn = \",\".join(indices[:self._modes]) + \"->\" + indices[:self._modes]\n multi_cohs_vec = np.einsum(eqn, *multi_cohs_list) # tensor product of specified coherent states\n\n if self.is_pure:\n ovlap = np.vdot(multi_cohs_vec, s)\n return np.abs(ovlap) ** 2\n\n bra_indices = indices[:2 * self._modes:2]\n ket_indices = indices[1:2 * self._modes:2]\n new_eqn_lhs = \",\".join([bra_indices, ket_indices])\n new_eqn_rhs = \"\".join(bra_indices[idx] + ket_indices[idx] for idx in range(self._modes))\n outer_prod_eqn = new_eqn_lhs + \"->\" + new_eqn_rhs\n multi_coh_matrix = np.einsum(outer_prod_eqn, multi_cohs_vec, np.conj(multi_cohs_vec))\n\n return np.vdot(s, multi_coh_matrix).real\n\n def wigner(self, mode, xvec, pvec):\n r\"\"\"Calculates the discretized Wigner function of the specified mode.\n\n .. note::\n\n This code is a modified version of the 'iterative' method of the\n `wigner function provided in QuTiP <http://qutip.org/docs/4.0.2/apidoc/functions.html?highlight=wigner#qutip.wigner.wigner>`_,\n which is released under the BSD license, with the following\n copyright notice:\n\n Copyright (C) 2011 and later, P.D. Nation, J.R. Johansson,\n A.J.G. Pitchford, C. Granade, and A.L. Grimsmo. All rights reserved.\n\n Args:\n mode (int): the mode to calculate the Wigner function for\n xvec (array): array of discretized :math:`x` quadrature values\n pvec (array): array of discretized :math:`p` quadrature values\n\n Returns:\n array: 2D array of size [len(xvec), len(pvec)], containing reduced Wigner function\n values for specified x and p values.\n \"\"\"\n rho = self.reduced_dm(mode)\n Q, P = np.meshgrid(xvec, pvec)\n A = (Q + P * 1.0j) / (2*np.sqrt(self._hbar/2))\n\n Wlist = np.array([np.zeros(np.shape(A), dtype=complex) for k in range(self._cutoff)])\n\n # Wigner function for |0><0|\n Wlist[0] = np.exp(-2.0 * np.abs(A)**2) / np.pi\n\n # W = rho(0,0)W(|0><0|)\n W = np.real(rho[0, 0]) * np.real(Wlist[0])\n\n for n in range(1, self._cutoff):\n Wlist[n] = (2.0 * A * Wlist[n - 1]) / np.sqrt(n)\n W += 2 * np.real(rho[0, n] * Wlist[n])\n\n for m in range(1, self._cutoff):\n temp = copy(Wlist[m])\n # Wlist[m] = Wigner function for |m><m|\n Wlist[m] = (2 * np.conj(A) * temp - np.sqrt(m)\n * Wlist[m - 1]) / np.sqrt(m)\n\n # W += rho(m,m)W(|m><m|)\n W += np.real(rho[m, m] * Wlist[m])\n\n for n in range(m + 1, self._cutoff):\n temp2 = (2 * A * Wlist[n - 1] - np.sqrt(m) * temp) / np.sqrt(n)\n temp = copy(Wlist[n])\n # Wlist[n] = Wigner function for |m><n|\n Wlist[n] = temp2\n\n # W += rho(m,n)W(|m><n|) + rho(n,m)W(|n><m|)\n W += 2 * np.real(rho[m, n] * Wlist[n])\n\n return W / (self._hbar)\n\n def quad_expectation(self, mode, phi=0, **kwargs):\n a = np.diag(np.sqrt(np.arange(1, self._cutoff+5)), 1)\n x = np.sqrt(self._hbar/2) * (a + a.T)\n p = -1j * np.sqrt(self._hbar/2) * (a - a.T)\n\n xphi = np.cos(phi)*x + np.sin(phi)*p\n xphisq = np.dot(xphi, xphi)\n\n # truncate down\n xphi = xphi[:self._cutoff, :self._cutoff]\n xphisq = xphisq[:self._cutoff, :self._cutoff]\n\n rho = self.reduced_dm(mode)\n\n mean = np.trace(np.dot(xphi, rho)).real\n var = np.trace(np.dot(xphisq, rho)).real - mean**2\n\n return mean, var\n\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n # pylint: disable=too-many-branches\n\n if A is None:\n A = np.zeros([2*self._modes, 2*self._modes])\n\n if A.shape != (2*self._modes, 2*self._modes):\n raise ValueError(\"Matrix of quadratic coefficients A must be of size 2Nx2N.\")\n\n if not np.allclose(A.T, A):\n raise ValueError(\"Matrix of quadratic coefficients A must be symmetric.\")\n\n if d is None:\n linear_coeff = np.zeros([2*self._modes])\n else:\n linear_coeff = d.copy()\n linear_coeff[self._modes:] = -d[self._modes:]\n\n if linear_coeff.shape != (2*self._modes,):\n raise ValueError(\"Vector of linear coefficients d must be of length 2N.\")\n\n # expand the cutoff dimension in approximating the x and p\n # operators in the Fock basis, to reduce numerical inaccuracy.\n worksize = 1\n dim = self._cutoff + worksize\n\n # construct the x and p operators\n a = np.diag(np.sqrt(np.arange(1, dim)), 1)\n x_ = np.sqrt(self._hbar/2) * (a + a.T)\n p_ = -1j * np.sqrt(self._hbar/2) * (a - a.T)\n\n if phi != 0:\n # rotate the quadrature operators\n x = np.cos(phi)*x_ - np.sin(phi)*p_\n p = np.sin(phi)*x_ + np.cos(phi)*p_\n else:\n x = x_\n p = p_\n\n def expand_dims(op, n, modes):\n \"\"\"Expand quadrature operator to act on nth mode\"\"\"\n I = np.identity(dim)\n allowed_indices = zip(indices[:2*modes:2], indices[1:2*modes:2])\n ind = ','.join(a+b for a, b in allowed_indices)\n ops = [I]*n + [op] + [I]*(modes-n-1)\n # the einsum 'ij,kl,mn->ijklmn' (for 3 modes)\n return np.einsum(ind, *ops)\n\n # determine modes with quadratic expectation values\n nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(linear_coeff.nonzero()[0], self._modes)])\n ex_modes = list(set(nonzero))\n num_modes = len(ex_modes)\n\n if not ex_modes:\n # only a constant term was provided\n return k, 0.\n\n # There are non-zero elements of A and/or d\n # therefore there are quadratic and/or linear terms.\n # find the reduced density matrix\n rho = self.reduced_dm(modes=ex_modes)\n\n # generate vector of quadrature operators\n # this array will have shape [2*num_modes] + [dim]*(2*num_modes)\n r = np.empty([2*num_modes] + [dim]*(2*num_modes), dtype=np.complex128)\n for n in range(num_modes):\n r[n] = expand_dims(x, n, num_modes)\n r[num_modes+n] = expand_dims(p, n, num_modes)\n\n # reduce the size of A so that we only consider modes\n # which we need to calculate the expectation value for\n rows = ex_modes + [i+self._modes for i in ex_modes]\n quad_coeffs = A[:, rows][rows]\n quad_coeffs[num_modes:, :num_modes] = -quad_coeffs[num_modes:, :num_modes]\n quad_coeffs[:num_modes, num_modes:] = -quad_coeffs[:num_modes, num_modes:]\n\n # Compute the polynomial\n #\n # For 3 modes, this gives the einsum (with brackets denoting modes):\n # 'a(bc)(de)(fg),a(ch)(ei)(gj)->(bh)(di)(fj)' applied to r, A@r\n #\n # a corresponds to the index in the vector of quadrature operators\n # r = (x_1,...,x_n,p_1,...,p_n), and the remaining indices ijklmn\n # are the elements of the operator acting on a 3 mode density matrix.\n #\n # So, in effect, matrix of quadratic coefficients A acts only on index a,\n # this index is then summed, and then each mode of r, A@r undergoes\n # matrix multiplication\n ind1 = indices[:2*num_modes+1]\n ind2 = ind1[0] + ''.join([str(i)+str(j) for i, j in zip(ind1[2::2], indices[2*num_modes+1:3*num_modes+1])])\n ind3 = ''.join([str(i)+str(j) for i, j in zip(ind1[1::2], ind2[2::2])])\n ind = \"{},{}->{}\".format(ind1, ind2, ind3)\n\n if np.allclose(quad_coeffs, 0.):\n poly_op = np.zeros([dim]*(2*num_modes), dtype=np.complex128)\n else:\n # Einsum above applied to to r,Ar\n # This einsum sums over all quadrature operators, and also applies matrix\n # multiplication between the same mode of each operator\n poly_op = np.einsum(ind, r, np.tensordot(quad_coeffs, r, axes=1)).conj()\n\n # add linear term\n rows = np.flip(np.array(rows).reshape([2, -1]), axis=1).flatten()\n poly_op += r.T @ linear_coeff[rows]\n\n # add constant term\n if k != 0:\n poly_op += k*expand_dims(np.eye(dim), 0, num_modes)\n\n # calculate Op^2\n ind = \"{},{}->{}\".format(ind1[1:], ind2[1:], ind3)\n poly_op_sq = np.einsum(ind, poly_op, poly_op)\n\n # truncate down\n sl = tuple([slice(0, self._cutoff)]*(2*num_modes))\n poly_op = poly_op[sl]\n poly_op_sq = poly_op_sq[sl]\n\n ind1 = ind1[:-1]\n ind2 = ''.join([str(j)+str(i) for i, j in zip(ind1[::2], ind1[1::2])])\n ind = \"{},{}\".format(ind1, ind2)\n\n # calculate expectation value, Tr(Op @ rho)\n # For 3 modes, this gives the einsum '(ab)(cd)(ef),(ba)(dc)(fe)->'\n mean = np.einsum(ind, poly_op, rho).real\n\n # calculate variance Tr(Op^2 @ rho) - Tr(Op @ rho)^2\n var = np.einsum(ind, poly_op_sq, rho).real - mean**2\n\n return mean, var\n\n\nclass BaseGaussianState(BaseState):\n r\"\"\"Class for the representation of quantum states using the Gaussian formalism.\n\n Note that this class uses the Gaussian representation convention\n\n .. math:: \\bar{\\mathbf{r}} = (\\bar{x}_1,\\bar{x}_2,\\dots,\\bar{x}_N,\\bar{p}_1,\\dots,\\bar{p}_N)\n\n Args:\n state_data (tuple(mu, cov)): A tuple containing the vector of means array ``mu`` and the\n covariance matrix array ``cov``, in terms of the complex displacement.\n num_modes (int): the number of modes in the state\n pure (bool): True if the state is a pure state, false if the state is mixed\n mode_names (Sequence): (optional) this argument contains a list providing mode names\n for each mode in the state\n \"\"\"\n def __init__(self, state_data, num_modes, mode_names=None):\n super().__init__(num_modes, mode_names)\n\n self._data = state_data\n\n # vector of means and covariance matrix, using frontend x,p scaling\n self._mu = self._data[0] * np.sqrt(self._hbar/2)\n self._cov = self._data[1] * (self._hbar/2)\n # complex displacements of the Gaussian state\n self._alpha = self._mu[:self._modes] + 1j*self._mu[self._modes:]\n self._alpha /= np.sqrt(2*self._hbar)\n\n self._pure = np.abs(np.linalg.det(self._cov) - (self._hbar/2)**(2*self._modes)) < self.EQ_TOLERANCE\n\n self._basis = 'gaussian'\n self._str = \"<GaussianState: num_modes={}, pure={}, hbar={}>\".format(\n self.num_modes, self._pure, self._hbar)\n\n def __eq__(self, other):\n \"\"\"Equality operator for BaseGaussianState.\n\n Returns True if other BaseGaussianState is close to self.\n This is done by comparing the means vector and cov matrix.\n If both are within the EQ_TOLERANCE, True is returned.\n\n Args:\n other (BaseGaussianState): BaseGaussianState to compare against.\n \"\"\"\n #pylint: disable=protected-access\n if not isinstance(other, type(self)):\n return False\n\n if self.num_modes != other.num_modes:\n return False\n\n if np.allclose(self._mu, other._mu, atol=self.EQ_TOLERANCE, rtol=0) and \\\n np.allclose(self._cov, other._cov, atol=self.EQ_TOLERANCE, rtol=0):\n return True\n\n return False\n\n def means(self):\n r\"\"\"The vector of means describing the Gaussian state.\n\n For a :math:`N` mode state, this has the form\n\n .. math::\n \\bar{\\mathbf{r}} = \\left(\\bar{x}_0,\\dots,\\bar{x}_{N-1},\\bar{p}_0,\\dots,\\bar{p}_{N-1}\\right)\n\n where :math:`\\bar{x}_i` and :math:`\\bar{p}_i` refer to the mean\n position and momentum quadrature of mode :math:`i` respectively.\n\n Returns:\n array: a length :math:`2N` array containing the vector of means.\n \"\"\"\n return self._mu\n\n def cov(self):\n r\"\"\"The covariance matrix describing the Gaussian state.\n\n The diagonal elements of the covariance matrix correspond to the\n variance in the position and momentum quadratures:\n\n .. math::\n \\mathbf{V}_{ii} = \\begin{cases}\n (\\Delta x_i)^2, & 0\\leq i\\leq N-1\\\\\n (\\Delta p_{i-N})^2, & N\\leq i\\leq 2(N-1)\n \\end{cases}\n\n where :math:`\\Delta x_i` and :math:`\\Delta p_i` refer to the\n position and momentum quadrature variance of mode :math:`i` respectively.\n\n Note that if the covariance matrix is purely diagonal, then this\n corresponds to squeezing :math:`z=re^{i\\phi}` where :math:`\\phi=0`,\n and :math:`\\Delta x_i = e^{-2r}`, :math:`\\Delta p_i = e^{2r}`.\n\n Returns:\n array: the :math:`2N\\times 2N` covariance matrix.\n \"\"\"\n return self._cov\n\n def reduced_gaussian(self, modes):\n r\"\"\" Returns the vector of means and the covariance matrix of the specified modes.\n\n Args:\n modes (int of Sequence[int]): indices of the requested modes\n\n Returns:\n tuple (means, cov): where means is an array containing the vector of means,\n and cov is a square array containing the covariance matrix.\n \"\"\"\n if modes == list(range(self._modes)):\n # reduced state is full state\n return self._mu, self._cov\n\n # reduce rho down to specified subsystems\n if isinstance(modes, int):\n modes = [modes]\n\n if modes != sorted(modes):\n raise ValueError(\"The specified modes cannot be duplicated.\")\n\n if len(modes) > self._modes:\n raise ValueError(\"The number of specified modes cannot \"\n \"be larger than the number of subsystems.\")\n\n ind = np.concatenate([np.array(modes), np.array(modes)+self._modes])\n rows = ind.reshape(-1, 1)\n cols = ind.reshape(1, -1)\n\n mu = self._mu[ind]\n cov = self._cov[rows, cols]\n\n return mu, cov\n\n def is_coherent(self, mode, tol=1e-10):\n r\"\"\"Returns True if the Gaussian state of a particular mode is a coherent state.\n\n Args:\n mode (int): the specified mode\n tol (float): the numerical precision in determining if squeezing is not present\n\n Returns:\n bool: True if and only if the state is a coherent state.\n \"\"\"\n mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n return np.allclose(cov, np.identity(2), atol=tol, rtol=0)\n\n def displacement(self, modes=None):\n r\"\"\"Returns the displacement parameter :math:`\\alpha` of the modes specified.\n\n Args:\n modes (int or Sequence[int]): modes specified\n\n Returns:\n Sequence[complex]: sequence of complex displacements :math:`\\alpha`\n corresponding to the list of specified modes\n \"\"\"\n if modes is None:\n modes = list(range(self._modes))\n elif isinstance(modes, int): # pragma: no cover\n modes = [modes]\n\n return self._alpha[list(modes)]\n\n def is_squeezed(self, mode, tol=1e-6):\n r\"\"\"Returns True if the Gaussian state of a particular mode is a squeezed state.\n\n Args:\n mode (int): the specified mode\n tol (float): the numerical precision in determining if squeezing is present\n\n Returns:\n bool: True if and only if the state is a squeezed state.\n \"\"\"\n mu, cov = self.reduced_gaussian([mode]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n return np.any(np.abs(cov - np.identity(2)) > tol)\n\n def squeezing(self, modes=None):\n r\"\"\"Returns the squeezing parameters :math:`(r,\\phi)` of the modes specified.\n\n Args:\n modes (int or Sequence[int]): modes specified\n\n Returns:\n List[(float, float)]: sequence of tuples containing the squeezing\n parameters :math:`(r,\\phi)` of the specified modes.\n \"\"\"\n if modes is None:\n modes = list(range(self._modes))\n elif isinstance(modes, int): # pragma: no cover\n modes = [modes]\n\n res = []\n for i in modes:\n mu, cov = self.reduced_gaussian([i]) # pylint: disable=unused-variable\n cov /= self._hbar/2\n tr = np.trace(cov)\n\n r = np.arccosh(tr/2)/2\n\n if cov[0, 1] == 0.:\n phi = 0\n else:\n phi = -np.arcsin(2*cov[0, 1] / np.sqrt((tr-2)*(tr+2)))\n\n res.append((r, phi))\n\n return res\n\n #=====================================================\n # the following methods are overwritten from BaseState\n\n def wigner(self, mode, xvec, pvec):\n mu, cov = self.reduced_gaussian([mode])\n\n X, P = np.meshgrid(xvec, pvec)\n grid = np.empty(X.shape + (2,))\n grid[:, :, 0] = X\n grid[:, :, 1] = P\n mvn = multivariate_normal(mu, cov, allow_singular=True)\n\n return mvn.pdf(grid)\n\n def quad_expectation(self, mode, phi=0, **kwargs):\n # pylint: disable=unused-argument\n mu, cov = self.reduced_gaussian([mode])\n rot = _R(phi)\n\n muphi = rot.T @ mu\n covphi = rot.T @ cov @ rot\n return (muphi[0], covphi[0, 0])\n\n def poly_quad_expectation(self, A, d=None, k=0, phi=0, **kwargs):\n if A is None:\n A = np.zeros([2*self._modes, 2*self._modes])\n\n if A.shape != (2*self._modes, 2*self._modes):\n raise ValueError(\"Matrix of quadratic coefficients A must be of size 2Nx2N.\")\n\n if not np.allclose(A.T, A):\n raise ValueError(\"Matrix of quadratic coefficients A must be symmetric.\")\n\n if d is not None:\n if d.shape != (2*self._modes,):\n raise ValueError(\"Vector of linear coefficients d must be of length 2N.\")\n else:\n d = np.zeros([2*self._modes])\n\n # determine modes with quadratic expectation values\n nonzero = np.concatenate([np.mod(A.nonzero()[0], self._modes), np.mod(d.nonzero()[0], self._modes)])\n ex_modes = list(set(nonzero))\n\n # reduce the size of A so that we only consider modes\n # which we need to calculate the expectation value for\n rows = ex_modes + [i+self._modes for i in ex_modes]\n num_modes = len(ex_modes)\n quad_coeffs = A[:, rows][rows]\n\n if not ex_modes:\n # only a constant term was provided\n return k, 0.\n\n mu = self._mu\n cov = self._cov\n\n if phi != 0:\n # rotate all modes of the covariance matrix and vector of means\n R = _R(phi)\n C = changebasis(self._modes)\n rot = C.T @ block_diag(*([R]*self._modes)) @ C\n\n mu = rot.T @ mu\n cov = rot.T @ cov @ rot\n\n # transform to the expectation of a quadratic on a normal distribution with zero mean\n # E[P(r)]_(mu,cov) = E(Q(r+mu)]_(0,cov)\n # = E[rT.A.r + rT.(2A.mu+d) + (muT.A.mu+muT.d+cI)]_(0,cov)\n # = E[rT.A.r + rT.d' + k']_(0,cov)\n d2 = 2*A @ mu + d\n k2 = mu.T @ A @ mu + mu.T @ d + k\n\n # expectation value E[P(r)]_{mu=0} = tr(A.cov) + muT.A.mu + muT.d + k|_{mu=0}\n # = tr(A.cov) + k\n mean = np.trace(A @ cov) + k2\n # variance Var[P(r)]_{mu=0} = 2tr(A.cov.A.cov) + 4*muT.A.cov.A.mu + dT.cov.d|_{mu=0}\n # = 2tr(A.cov.A.cov) + dT.cov.d\n var = 2*np.trace(A @ cov @ A @ cov) + d2.T @ cov @ d2\n\n # Correction term to account for incorrect symmetric ordering in the variance.\n # This occurs because Var[S(P(r))] = Var[P(r)] - Σ_{m1, m2} |hbar*A_{(m1, m1+N),(m2, m2+N)}|,\n # where m1, m2 are all possible mode numbers, and N is the total number of modes.\n # Therefore, the correction term is the sum of the determinants of 2x2 submatrices of A.\n modes = np.arange(2*num_modes).reshape(2, -1).T\n var -= np.sum([np.linalg.det(self._hbar*quad_coeffs[:, m][n]) for m in modes for n in modes])\n\n return mean, var\n\n @abc.abstractmethod\n def reduced_dm(self, modes, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fock_prob(self, n, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def mean_photon(self, mode, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity(self, other_state, mode, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_vacuum(self, **kwargs):\n raise NotImplementedError\n\n @abc.abstractmethod\n def fidelity_coherent(self, alpha_list, **kwargs):\n raise NotImplementedError\n",
"#!/usr/bin/env python3\nimport strawberryfields as sf\nfrom strawberryfields.ops import *\nfrom strawberryfields.utils import scale\nfrom numpy import pi, sqrt\nimport numpy as np\n\n# initialize engine and program objects\neng = sf.Engine(backend=\"gaussian\")\ngaussian_cloning = sf.Program(4)\n\nwith gaussian_cloning.context as q:\n # state to be cloned\n Coherent(0.7+1.2j) | q[0]\n\n # 50-50 beamsplitter\n BS = BSgate(pi/4, 0)\n\n # symmetric Gaussian cloning scheme\n BS | (q[0], q[1])\n BS | (q[1], q[2])\n MeasureX | q[1]\n MeasureP | q[2]\n Xgate(scale(q[1], sqrt(2))) | q[0]\n Zgate(scale(q[2], sqrt(2))) | q[0]\n\n # after the final beamsplitter, modes q[0] and q[3]\n # will contain identical approximate clones of the\n # initial state Coherent(0.1+0j)\n BS | (q[0], q[3])\n # end circuit\n\n# run the engine\nresults = eng.run(gaussian_cloning, run_options={\"modes\": [0, 3]})\n\n# return the cloning fidelity\nfidelity = sqrt(results.state.fidelity_coherent([0.7+1.2j, 0.7+1.2j]))\n# return the cloned displacement\nalpha = results.state.displacement()\n\n# run the engine over an ensemble\nreps = 1000\nf = np.empty([reps])\na = np.empty([reps], dtype=np.complex128)\n\nfor i in range(reps):\n eng.reset()\n results = eng.run(gaussian_cloning, run_options={\"modes\": [0]})\n f[i] = results.state.fidelity_coherent([0.7+1.2j])\n a[i] = results.state.displacement()\n\nprint(\"Fidelity of cloned state:\", np.mean(f))\nprint(\"Mean displacement of cloned state:\", np.mean(a))\nprint(\"Mean covariance matrix of cloned state:\", np.cov([a.real, a.imag]))\n"
] | [
[
"numpy.dot",
"numpy.sqrt",
"numpy.einsum",
"numpy.trace",
"numpy.allclose",
"numpy.reshape",
"numpy.arange",
"numpy.eye",
"numpy.sin",
"numpy.linalg.det",
"numpy.real",
"numpy.tensordot",
"scipy.special.factorial",
"numpy.zeros",
"numpy.arccosh",
"scipy.stats.multivariate_normal",
"numpy.identity",
"numpy.transpose",
"numpy.meshgrid",
"numpy.array",
"numpy.sum",
"numpy.vdot",
"numpy.conj",
"numpy.abs",
"scipy.linalg.block_diag",
"numpy.cos",
"numpy.shape",
"numpy.empty"
],
[
"numpy.cov",
"numpy.mean",
"numpy.sqrt",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"0.12",
"0.10"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
KyleKing/dash_charts | [
"8e3644505047fa85f3175f5bc55a2421cb0a19ea"
] | [
"tests/examples/ex_rolling_chart.py"
] | [
"\"\"\"Example Rolling Mean and Filled Standard Deviation Chart.\"\"\"\n\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport numpy as np\nimport pandas as pd\nimport plotly.graph_objects as go\nfrom implements import implements\n\nfrom dash_charts.scatter_line_charts import RollingChart\nfrom dash_charts.utils_app import AppBase, AppInterface\nfrom dash_charts.utils_callbacks import map_args, map_outputs\nfrom dash_charts.utils_fig import make_dict_an, min_graph\nfrom dash_charts.utils_helpers import parse_dash_cli_args\n\n\n@implements(AppInterface) # noqa: H601\nclass RollingDemo(AppBase):\n \"\"\"Example creating a rolling mean chart.\"\"\"\n\n name = 'Example Rolling Chart'\n \"\"\"Application name\"\"\"\n\n data_raw = None\n \"\"\"All in-memory data referenced by callbacks and plotted. If modified, will impact all viewers.\"\"\"\n\n chart_main = None\n \"\"\"Main chart (Rolling).\"\"\"\n\n id_slider = 'slider'\n \"\"\"Slider ID.\"\"\"\n\n id_chart = 'rolling'\n \"\"\"Unique name for the main chart.\"\"\"\n\n def initialization(self) -> None:\n \"\"\"Initialize ids with `self.register_uniq_ids([...])` and other one-time actions.\"\"\"\n super().initialization()\n self.register_uniq_ids([self.id_slider, self.id_chart])\n\n def generate_data(self) -> None:\n \"\"\"Create self.data_raw with sample data.\"\"\"\n # Generate random data points\n count = 1000\n mu, sigma = (15, 10) # mean and standard deviation\n samples = np.random.normal(mu, sigma, count)\n # Add a break at the mid-point\n mid_count = count / 2\n y_vals = [samples[_i] + (-1 if _i > mid_count else 1) * _i / 10.0 for _i in range(count)]\n\n # Combine into a dataframe\n self.data_raw = pd.DataFrame(\n data={\n 'x': range(count),\n 'y': y_vals,\n 'label': [f'Point {idx}' for idx in range(count)],\n },\n )\n\n def create_elements(self) -> None:\n \"\"\"Initialize the charts, tables, and other Dash elements.\"\"\"\n self.chart_main = RollingChart(\n title='Sample Timeseries Chart with Rolling Calculations',\n xlabel='Index',\n ylabel='Measured Value',\n )\n # Add some example annotations\n colors = ['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728', '#9467bd', '#e377c2', '#7f7f7f', '#17becf', None]\n count = 1000\n y_offset = np.mean(self.data_raw['y']) - np.amin(self.data_raw['y'])\n for idx, color in enumerate(colors):\n label = f'Additional Information for index {idx + 1} and color {color}'\n coord = [self.data_raw[ax][20 + int(idx * count / len(colors))] for ax in ['x', 'y']]\n self.chart_main.annotations.append(\n go.layout.Annotation(\n **make_dict_an(coord, str(idx + 1), label, color, y_offset),\n ),\n )\n\n def return_layout(self) -> dict:\n \"\"\"Return Dash application layout.\n\n Returns:\n dict: Dash HTML object\n\n \"\"\"\n step = 50\n slider_max = 1000\n return html.Div(\n style={\n 'maxWidth': '1000px',\n 'marginRight': 'auto',\n 'marginLeft': 'auto',\n }, children=[\n html.H4(children=self.name),\n min_graph(id=self._il[self.id_chart], figure=self.chart_main.create_figure(self.data_raw)),\n dcc.RangeSlider(\n id=self._il[self.id_slider], min=0, max=slider_max, step=step / 5, value=[150, 825],\n marks={str(idx * step): str(idx * step) for idx in range(int(slider_max / step))},\n ),\n ],\n )\n\n def create_callbacks(self) -> None:\n \"\"\"Create Dash callbacks.\"\"\"\n outputs = [(self.id_chart, 'figure')]\n inputs = [(self.id_slider, 'value')]\n states = []\n\n @self.callback(outputs, inputs, states, pic=True)\n def update_chart(*raw_args):\n a_in, a_states = map_args(raw_args, inputs, states)\n slider = a_in[self.id_slider]['value']\n df_filtered = self.data_raw[(self.data_raw['x'] >= slider[0]) & (self.data_raw['x'] <= slider[1])]\n self.chart_main.axis_range = {'x': slider}\n new_figure = self.chart_main.create_figure(df_raw=df_filtered)\n\n # See: https://plot.ly/python/range-slider/\n new_figure['layout']['xaxis']['rangeslider'] = {'visible': True}\n return map_outputs(outputs, [(self.id_chart, 'figure', new_figure)])\n\n\ninstance = RollingDemo\napp = instance()\napp.create()\nif __name__ == '__main__':\n app.run(**parse_dash_cli_args())\nelse:\n FLASK_HANDLE = app.get_server()\n"
] | [
[
"numpy.amin",
"numpy.random.normal",
"numpy.mean"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hluedemann/inplace_abn | [
"5210ec0b38ba7c6c9deb08927aa18806ac3380f3",
"5210ec0b38ba7c6c9deb08927aa18806ac3380f3"
] | [
"scripts/models/densenet.py",
"inplace_abn/functions.py"
] | [
"import sys\nfrom collections import OrderedDict\nfrom functools import partial\n\nimport torch.nn as nn\n\nfrom inplace_abn import ABN\nfrom modules import GlobalAvgPool2d, DenseModule\nfrom .util import try_index\n\n\nclass DenseNet(nn.Module):\n def __init__(self,\n structure,\n norm_act=ABN,\n input_3x3=False,\n growth=32,\n theta=0.5,\n classes=0,\n dilation=1):\n \"\"\"DenseNet\n\n Parameters\n ----------\n structure : list of int\n Number of layers in each of the four dense blocks of the network.\n norm_act : callable\n Function to create normalization / activation Module.\n input_3x3 : bool\n If `True` use three `3x3` convolutions in the input module instead of a single `7x7` one.\n growth : int\n Number of channels in each layer, i.e. the \"growth\" factor of the DenseNet.\n theta : float\n Reduction factor for the transition blocks.\n classes : int\n If not `0` also include global average pooling and a fully-connected layer with `classes` outputs at the end\n of the network.\n dilation : int or list of int\n List of dilation factors, or `1` to ignore dilation. If the dilation factor for a module is greater than `1`\n skip the pooling in the transition block right before it.\n \"\"\"\n super(DenseNet, self).__init__()\n self.structure = structure\n if len(structure) != 4:\n raise ValueError(\"Expected a structure with four values\")\n\n # Initial layers\n if input_3x3:\n layers = [\n (\"conv1\", nn.Conv2d(3, growth * 2, 3, stride=2, padding=1, bias=False)),\n (\"bn1\", norm_act(growth * 2)),\n (\"conv2\", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),\n (\"bn2\", norm_act(growth * 2)),\n (\"conv3\", nn.Conv2d(growth * 2, growth * 2, 3, stride=1, padding=1, bias=False)),\n (\"pool\", nn.MaxPool2d(3, stride=2, padding=1))\n ]\n else:\n layers = [\n (\"conv1\", nn.Conv2d(3, growth * 2, 7, stride=2, padding=3, bias=False)),\n (\"pool\", nn.MaxPool2d(3, stride=2, padding=1))\n ]\n self.mod1 = nn.Sequential(OrderedDict(layers))\n\n in_channels = growth * 2\n for mod_id in range(4):\n d = try_index(dilation, mod_id)\n s = 2 if d == 1 and mod_id > 0 else 1\n\n # Create transition module\n if mod_id > 0:\n out_channels = int(in_channels * theta)\n layers = [\n (\"bn\", norm_act(in_channels)),\n (\"conv\", nn.Conv2d(in_channels, out_channels, 1, bias=False))\n ]\n if s == 2:\n layers.append((\"pool\", nn.AvgPool2d(2, 2)))\n self.add_module(\"tra%d\" % (mod_id + 1), nn.Sequential(OrderedDict(layers)))\n in_channels = out_channels\n\n # Create dense module\n mod = DenseModule(in_channels, growth, structure[mod_id], norm_act=norm_act, dilation=d)\n self.add_module(\"mod%d\" % (mod_id + 2), mod)\n in_channels = mod.out_channels\n\n # Pooling and predictor\n self.bn_out = norm_act(in_channels)\n if classes != 0:\n self.classifier = nn.Sequential(OrderedDict([\n (\"avg_pool\", GlobalAvgPool2d()),\n (\"fc\", nn.Linear(in_channels, classes))\n ]))\n\n def forward(self, x):\n x = self.mod1(x)\n x = self.mod2(x)\n x = self.tra2(x)\n x = self.mod3(x)\n x = self.tra3(x)\n x = self.mod4(x)\n x = self.tra4(x)\n x = self.mod5(x)\n x = self.bn_out(x)\n\n if hasattr(self, \"classifier\"):\n x = self.classifier(x)\n return x\n\n\n_NETS = {\n \"121\": {\"structure\": [6, 12, 24, 16]},\n \"169\": {\"structure\": [6, 12, 32, 32]},\n \"201\": {\"structure\": [6, 12, 48, 32]},\n \"264\": {\"structure\": [6, 12, 64, 48]},\n}\n\n__all__ = []\nfor name, params in _NETS.items():\n net_name = \"net_densenet\" + name\n setattr(sys.modules[__name__], net_name, partial(DenseNet, **params))\n __all__.append(net_name)\n",
"from typing import Optional\n\nimport torch\nimport torch.autograd as autograd\nimport torch.distributed as distributed\nfrom torch.autograd.function import once_differentiable\n\nfrom . import _backend\n\n\ndef _activation_from_name(activation):\n if activation == \"leaky_relu\":\n return _backend.Activation.LeakyReLU\n elif activation == \"elu\":\n return _backend.Activation.ELU\n elif activation == \"identity\":\n return _backend.Activation.Identity\n else:\n raise ValueError(\"Unknown activation function {}\".format(activation))\n\n\ndef _count_samples(x):\n count = x.size(0)\n for i in range(2, x.ndimension()):\n count *= x.size(i)\n return count\n\n\nclass InPlaceABN(autograd.Function):\n @staticmethod\n def _gather_values(*tensors, group, world_size):\n # Start gather operations asynchronously\n gathered, gather_ops = [], []\n for t in tensors:\n t_all = t.new_empty(world_size, *t.shape)\n t_op = distributed.all_gather(\n list(t_all.unbind(0)), t, group=group, async_op=True\n )\n\n gathered.append(t_all)\n gather_ops.append(t_op)\n\n # Wait\n for op in gather_ops:\n op.wait()\n\n # Return results\n return tuple(gathered)\n\n @staticmethod\n def _reduce_forward(mean, var, count, group, world_size):\n all_mean, all_var, all_count = InPlaceABN._gather_values(\n mean, var, count, group=group, world_size=world_size\n )\n return _backend.reduce_statistics(all_mean, all_var, all_count)\n\n @staticmethod\n def _reduce_backward(sum_dy, sum_xhat_dy, group, world_size):\n all_sum_dy, all_sum_xhat_dy = InPlaceABN._gather_values(\n sum_dy, sum_xhat_dy, group=group, world_size=world_size\n )\n return all_sum_dy.sum(dim=0), all_sum_xhat_dy.sum(dim=0)\n\n @staticmethod\n def forward(\n ctx,\n x,\n weight,\n bias,\n running_mean,\n running_var,\n training=True,\n momentum=0.1,\n eps=1e-05,\n activation=\"leaky_relu\",\n activation_param=0.01,\n group=None,\n ):\n # Save context\n ctx.training = training\n ctx.momentum = momentum\n ctx.eps = eps\n ctx.activation = _activation_from_name(activation)\n ctx.activation_param = activation_param\n ctx.group = group\n ctx.has_running_stats = running_mean is not None and running_mean is not None\n\n # Check if we really need to perform distributed operations\n if ctx.group is not None:\n ctx.distributed = True\n ctx.world_size = distributed.get_world_size(group=group)\n else:\n ctx.distributed = False\n ctx.world_size = 1\n\n if ctx.training:\n mean, var, count = _backend.statistics(x)\n\n # Gather stats from all workers if needed\n if ctx.distributed:\n mean, var, count = InPlaceABN._reduce_forward(\n mean, var, count, ctx.group, ctx.world_size\n )\n\n # Update running stats if needed\n if ctx.has_running_stats:\n count_ = count.to(dtype=var.dtype)\n running_mean.mul_((1 - ctx.momentum)).add_(ctx.momentum * mean)\n running_var.mul_((1 - ctx.momentum)).add_(\n ctx.momentum * var * count_ / (count_ - 1)\n )\n else:\n mean, var, count = running_mean, running_var, None\n\n # Transform x\n _backend.forward(\n x, mean, var, weight, bias, ctx.eps, ctx.activation, ctx.activation_param\n )\n\n # Save for backward and mark dirty tensors\n ctx.save_for_backward(x, var, count, weight, bias)\n ctx.mark_dirty(x)\n return x\n\n @staticmethod\n @once_differentiable\n def backward(ctx, dy_act):\n y_act, var, count, weight, bias = ctx.saved_tensors\n\n # Call backward_reduce if we need to compute at least one of the gradients\n if any(ctx.needs_input_grad):\n xhat, dy, sum_dy_local, sum_xhat_dy_local = _backend.backward_reduce(\n y_act,\n dy_act,\n weight,\n bias,\n ctx.eps,\n ctx.activation,\n ctx.activation_param,\n )\n\n if ctx.distributed:\n sum_dy, sum_xhat_dy = InPlaceABN._reduce_backward(\n sum_dy_local, sum_xhat_dy_local, ctx.group, ctx.world_size\n )\n else:\n sum_dy, sum_xhat_dy = sum_dy_local, sum_xhat_dy_local\n else:\n return None, None, None, None, None, None, None, None, None, None\n\n # Gradient w.r.t. x\n if ctx.needs_input_grad[0]:\n if ctx.training:\n # This overwrites dy with dx\n _backend.backward_train(\n xhat, dy, var, count, sum_dy, sum_xhat_dy, weight, ctx.eps\n )\n dx = dy\n else:\n dx = _backend.backward_test(dy, var, weight, ctx.eps)\n else:\n dx = None\n\n # Gradient w.r.t. weight\n if weight is not None and ctx.needs_input_grad[1]:\n dweight = sum_xhat_dy_local\n dweight[weight < 0] *= -1\n else:\n dweight = None\n\n # Gradient w.r.t. bias\n if bias is not None and ctx.needs_input_grad[2]:\n dbias = sum_dy_local\n else:\n dbias = None\n\n return dx, dweight, dbias, None, None, None, None, None, None, None, None\n\n\ndef inplace_abn(\n x: torch.Tensor,\n weight: Optional[torch.Tensor],\n bias: Optional[torch.Tensor],\n running_mean: Optional[torch.Tensor],\n running_var: Optional[torch.Tensor],\n training: bool = True,\n momentum: float = 0.1,\n eps: float = 1e-05,\n activation: str = \"leaky_relu\",\n activation_param: float = 0.01,\n):\n \"\"\"InPlace Activated Batch Normalization\n\n This applies the following per-channel combined BatchNorm + activation operation:\n\n x_hat = (x - mu) / sqrt(sigma^2 + eps)\n x <- act(x_hat, p) * (|weight| + eps) + bias\n\n where:\n - mu is the per-channel batch mean, or `running_mean` if `training` is `False`\n - sigma^2 is the per-channel batch variance, or `running_var` if `training` is `False`\n - act(., p) is the activation function specified by `activation`\n - p is `activation_param`, i.e. the negative slope of Leaky ReLU or alpha\n parameter of ELU\n - `weight` and `bias` are the optional affine parameters\n - `eps` is a small positive number\n\n The running statistics, if given and if `training` is `True` are updated as follows:\n\n running_mean <- running_mean * momentum + (1 - momentum) * mu\n running_var <- running_var * momentum + (1 - momentum) * unbiased_sigma^2\n\n where unbiased_sigma^2 is the unbiased batch variance\n\n Args:\n x: Input tensor with shape N x C or N x C x S_1 x ... x S_n, which will be\n overwritten with the result\n weight: Tensor of affine scale parameters with shape C, or `None`\n bias: Tensor of affine bias parameters with shape C, or `None`\n running_mean: Running mean tensor with shape C, or `None`\n running_var: Running variance tensor with shape C, or `None`\n training: If `True` compute, use and update batch statistics, otherwise use\n running statistics\n momentum: Momentum factor applied to compute running statistics\n eps: Small constant to prevent numerical issues\n activation: Name of the activation function, one of: `leaky_relu`, `elu` or `identity`\n activation_param: Negative slope for the `leaky_relu` activation or `alpha`\n parameter for the `elu` activation\n \"\"\"\n if training:\n samples = _count_samples(x)\n if samples <= 1:\n raise ValueError(\n \"inplace_abn is trying to compute batch statistics, but the input \"\n \"tensor only contains a single sample per channel\"\n )\n\n return InPlaceABN.apply(\n x,\n weight,\n bias,\n running_mean,\n running_var,\n training,\n momentum,\n eps,\n activation,\n activation_param,\n None,\n )\n\n\ndef inplace_abn_sync(\n x: torch.Tensor,\n weight: Optional[torch.Tensor],\n bias: Optional[torch.Tensor],\n running_mean: Optional[torch.Tensor],\n running_var: Optional[torch.Tensor],\n training: bool = True,\n momentum: float = 0.1,\n eps: float = 1e-05,\n activation: str = \"leaky_relu\",\n activation_param: float = 0.01,\n group=distributed.group.WORLD,\n):\n \"\"\"InPlace Activated Batch Normalization with distributed synchronization\n\n This operates like `inplace_abn`, but assumes to be called by all replicas\n in the given distributed group, and computes batch statistics across all of them.\n Note that the input tensors can have different dimensions in each replica.\n\n Args:\n x: Input tensor with shape N x C or N x C x S_1 x ... x S_n, which will be\n overwritten with the result\n weight: Tensor of affine scale parameters with shape C, or `None`\n bias: Tensor of affine bias parameters with shape C, or `None`\n running_mean: Running mean tensor with shape C, or `None`\n running_var: Running variance tensor with shape C, or `None`\n training: If `True` compute, use and update batch statistics, otherwise use\n running statistics\n momentum: Momentum factor applied to compute running statistics\n eps: Small constant to prevent numerical issues\n activation: Name of the activation function, one of: `leaky_relu`, `elu` or `identity`\n activation_param: Negative slope for the `leaky_relu` activation or `alpha`\n parameter for the `elu` activation\n group: Distributed group to synchronize with, default is WORLD\n \"\"\"\n if training:\n samples = _count_samples(x)\n if samples <= 1:\n raise ValueError(\n \"inplace_abn_sync is trying to compute batch statistics, but the input \"\n \"tensor only contains a single sample per channel\"\n )\n\n return InPlaceABN.apply(\n x,\n weight,\n bias,\n running_mean,\n running_var,\n training,\n momentum,\n eps,\n activation,\n activation_param,\n group,\n )\n\n\n__all__ = [\"inplace_abn\", \"inplace_abn_sync\"]\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AvgPool2d",
"torch.nn.Conv2d"
],
[
"torch.distributed.get_world_size"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lipinoelbreve/topicos-streamlit | [
"849d34ee4c8f1dbd700c50a2069e87dbb6c93663",
"849d34ee4c8f1dbd700c50a2069e87dbb6c93663"
] | [
"data-collection/main.py",
"data-collection/generar_muestras.py"
] | [
"# %%\n# Recorre las 1000 paginas de articulos que podemos ver\n# De cada artículo guarda:\n# Url\n# Id de Pubmed\n# Título\n# Keywords\n# Lista de autores con nombres y afiliaciones y país\n\n# El código no para hasta que lo frenes o que llegue a la página 1.000, pero cada vez que carga un artículo lo guarda, así que se puede\n# frenar en cualquier momento\n\nfrom bs4 import BeautifulSoup\nimport requests\nimport re\nimport numpy as np\nfrom tqdm import tqdm\nfrom time import sleep\nimport os\nfrom Article import ArticleCollection\n\n#%%\nbase_url = 'https://pubmed.ncbi.nlm.nih.gov'\nfilter_url = '/?term=(((%222016%22%5BDate%20-%20Publication%5D%20%3A%20%223000%22%5BDate%20-%20Publication%5D))%20AND%20(%22english%22%5BLanguage%5D))%20AND%20(%22journal%20article%22%5BPublication%20Type%5D)'\n\nyears_to_process = [2016,2017,2018,2019,2020,2021]\npages_per_year = 500\npause_duration = 1 # segundos entre requests\n\n#%%\n# Cargo lo que se avanzo hasta ahora\ncurrent = os.getcwd()\nfilename = current + '/articles.pkl'\narticle_collection = ArticleCollection()\narticle_collection.load_years(years_to_process)\n\nremaining_pages = np.arange(1,pages_per_year+1)\nif os.path.exists(filename):\n article_collection.load('articles.pkl')\n processed_pages = article_collection.processed_pages[ article_collection.current_year ]\n \n if len(processed_pages) > 0:\n remaining_pages = np.arange(processed_pages[-1], pages_per_year+1)\n\n years_to_process = np.arange(article_collection.current_year, 2022)\n\n#%%\nprint('Descargando articulos...')\nprint('Ctrl + C para frenar (todo el proceso es guardado)')\n\nfor year in years_to_process:\n print('Processing year', year)\n article_collection.current_year = year\n for page in tqdm( remaining_pages ):\n url = base_url + filter_url + '&filter=years.' + str(year) + '-' + str(year) + '&page=' + str(page)\n r = requests.get(url)\n souped = BeautifulSoup(r.content.decode(\"utf-8\"), features=\"html.parser\")\n\n articles_in_page = souped.find_all('a', attrs={'class': 'docsum-title'})\n articles_ids = [ int(re.sub('[^\\d]', '', article['href'])) for article in articles_in_page ]\n \n for article_id in articles_ids:\n if article_id not in article_collection.articles.keys():\n article_link = base_url + '/' + str(article_id)\n res = article_collection.get_article_data( article_link )\n article_collection.save('articles.pkl')\n print(res, article_id)\n sleep(pause_duration)\n\n if page not in article_collection.processed_pages:\n article_collection.processed_pages[article_collection.current_year].append(page)\n print('Processed page', page, '-', article_collection.current_year)\n \n remaining_pages = np.arange(1, pages_per_year+1)",
"from Article import *\nimport pandas as pd\nimport numpy as np\nfrom tqdm import tqdm\n\narticles = ArticleCollection()\narticles.load('articles.pkl')\n\nkeys = list(articles.articles.keys())\ntipos = ['cardiaca', 'respiratoria', 'genetica']\nenfermedades = pd.DataFrame(columns=['Enfermedad','Grupo'])\nautores = pd.DataFrame(columns=['Institucion','Pais'])\ninvestigaciones = pd.DataFrame(columns = ['Institucion','Enfermedad','Year'])\n\nfor _ in tqdm(range(500)):\n article = np.random.choice(list(articles.articles.values()))\n keywords = article.keywords\n for keyword in keywords:\n if keyword not in enfermedades.Enfermedad:\n enfermedades = enfermedades.append(\n pd.DataFrame({'Enfermedad':[keyword],\n 'Grupo': [np.random.choice(tipos)]}),\n ignore_index=True)\n\n authors = article.authors\n for author in authors:\n if author.affiliation_short_name not in autores.Institucion:\n if author.affiliation_short_name != str:\n if author.affiliation_short_name != None:\n autores = autores.append(\n pd.DataFrame({\n 'Institucion': [author.affiliation_short_name],\n 'Pais': [author.affiliation_country]\n }),\n ignore_index = True\n )\n \n for author in authors:\n if author.affiliation_short_name != str:\n if author.affiliation_short_name != None:\n for keyword in keywords:\n investigaciones = investigaciones.append(\n pd.DataFrame({\n 'Institucion': [author.affiliation_short_name],\n 'Enfermedad': [keyword],\n 'Year': [article.year]\n }),\n ignore_index = True\n )\n\nenfermedades.to_csv('enfermedades.csv')\nautores.to_csv('autores.csv')\ninvestigaciones.to_csv('investigaciones.csv')"
] | [
[
"numpy.arange"
],
[
"pandas.DataFrame",
"numpy.random.choice"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
VisCog/ArgusShapes | [
"ba361e28b8d30097c41314bbfe68341cc8ac0c01"
] | [
"argus_shapes/tests/test_argus_shapes.py"
] | [
"from __future__ import absolute_import, division, print_function\nimport os\nimport numpy as np\nimport pandas as pd\nimport shutil\nimport requests\n\nimport numpy.testing as npt\nimport pytest\n\nimport skimage.io as skio\n\nfrom .. import argus_shapes as shapes\nimport pulse2percept.implants as p2pi\n\ntry:\n FileNotFoundError\nexcept NameError:\n # Python 2\n FileNotFoundError = IOError\n\n\ndef generate_dummy_data():\n X = pd.DataFrame()\n X['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'])\n X['feature1'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n X['feature2'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])\n y = pd.DataFrame()\n y['subject'] = pd.Series(['S1', 'S1', 'S2', 'S2', 'S3', 'S3'],\n index=X.index)\n y['target'] = pd.Series([0.1, 0.2, 0.3, 0.4, 0.5, 0.6],\n index=X.index)\n y['image'] = pd.Series([np.random.rand(10, 10)] * 6)\n y['area'] = pd.Series([1, 2, 3, 4, 5, 6])\n return X, y\n\n\ndef test_download_file():\n fname = \"test.zip\"\n with pytest.raises(requests.exceptions.HTTPError):\n shapes.download_file(\"https://github.com/VisCog/blah\", fname)\n shapes.download_file(\"https://osf.io/rduj4\", fname)\n os.remove(fname)\n\n\ndef test_fetch_data():\n test_dir = \"test\"\n with pytest.raises(ValueError):\n shapes.fetch_data()\n shapes.fetch_data(save_path=test_dir)\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes.zip')),\n True\n )\n npt.assert_equal(os.path.isdir(os.path.join(test_dir, 'argus_shapes')),\n True)\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes',\n 'drawings_single.csv')),\n True\n )\n npt.assert_equal(\n os.path.exists(os.path.join(test_dir, 'argus_shapes', 'subjects.csv')),\n True\n )\n shutil.rmtree(test_dir)\n\n\ndef test_load_data():\n with pytest.raises(FileNotFoundError):\n shapes.load_data(\"doesforsurenotexist.csv\", auto_fetch=False)\n\n csvfile = \"data.csv\"\n csvfile2 = \"data2.csv\"\n imgfile = \"test_image.png\"\n skio.imsave(imgfile, np.random.randint(256, size=(10, 10)))\n\n subjects = ['S1', 'S2']\n electrodes = ['A1', 'F9']\n amps = [2.0, 3.0]\n for use_fullpath in [True, False]:\n data = []\n for subject in subjects:\n for electrode in electrodes:\n for amp in amps:\n if use_fullpath:\n fname = os.path.join(os.getcwd(), imgfile)\n else:\n fname = imgfile\n row = {\n 'subject_id': subject,\n 'PTS_ELECTRODE': electrode,\n 'PTS_FILE': fname,\n 'PTS_AMP': amp,\n 'PTS_FREQ': 20.0,\n 'PTS_PULSE_DUR': 0.45,\n 'stim_class': 'SingleElectrode',\n 'date': '1985/09/30'\n }\n data.append(row)\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_data(csvfile)\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(len(X), len(subjects) * len(electrodes) * len(amps))\n\n with pytest.raises(ValueError):\n XX = X.copy()\n XX['PTS_ELECTRODE1'] = XX['electrode']\n XX['PTS_ELECTRODE2'] = XX['electrode']\n XX.drop(columns='electrode', inplace=True)\n XX.to_csv(csvfile2, index=False)\n X = shapes.load_data(csvfile2)\n\n for subject in subjects + ['nobody', 'S10']:\n X = shapes.load_data(csvfile, subject=subject)\n if subject in subjects:\n npt.assert_equal(np.sort(X.subject.unique()), subject)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(np.sort(X.amp.unique()), amps)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n for electrode in electrodes + ['F10']:\n X = shapes.load_data(csvfile, electrodes=[electrode])\n if electrode in electrodes:\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrode)\n npt.assert_equal(np.sort(X.amp.unique()), amps)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n for amp in amps + [1.5]:\n X = shapes.load_data(csvfile, amp=amp)\n if np.any([np.isclose(a, amp) for a in amps]):\n npt.assert_equal(np.sort(X.subject.unique()), subjects)\n npt.assert_equal(np.sort(X.electrode.unique()), electrodes)\n npt.assert_equal(np.sort(X.amp.unique()), amp)\n else:\n npt.assert_equal(len(X), 0)\n npt.assert_equal(len(X.columns), 0)\n\n with pytest.raises(ValueError):\n shapes.load_data(csvfile, electrodes='A1')\n\n os.remove(csvfile)\n os.remove(csvfile2)\n os.remove(imgfile)\n\n\ndef test_load_subjects():\n with pytest.raises(FileNotFoundError):\n shapes.load_subjects(\"forsuredoesntexist.csv\", auto_fetch=False)\n\n csvfile = \"data.csv\"\n data = [\n {'subject_id': 'S1', 'implant_type_str': 'ArgusI',\n 'implant_x': 10, 'implant_y': 20, 'implant_rot': 0.5,\n 'xmin': -30, 'xmax': 30, 'ymin': -20, 'ymax': 20,\n 'loc_od_x': 15, 'loc_od_y': 2},\n {'subject_id': 'S2', 'implant_type_str': 'ArgusII',\n 'implant_x': 20, 'implant_y': 40, 'implant_rot': 1.0,\n 'xmin': -60, 'xmax': 60, 'ymin': -30, 'ymax': 30,\n 'loc_od_x': 19, 'loc_od_y': 4},\n ]\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n npt.assert_equal(np.sort(X.index.unique()), ['S1', 'S2'])\n print(X.columns)\n npt.assert_equal(X.loc['S1', 'implant_type'], p2pi.ArgusI)\n npt.assert_equal(X.loc['S2', 'implant_type'], p2pi.ArgusII)\n # etc.\n\n with pytest.raises(ValueError):\n # Missing 'subject_id' index:\n pd.DataFrame([{'subject': 'S1'}]).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n\n with pytest.raises(ValueError):\n # Other missing columns:\n pd.DataFrame([{'subject_id': 'S1'}]).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n with pytest.raises(ValueError):\n # Wrong implant type:\n data[0]['implant_type_str'] = 'ArgusIII'\n pd.DataFrame(data).to_csv(csvfile, index=False)\n X = shapes.load_subjects(csvfile)\n os.remove(csvfile)\n\n\ndef test_is_singlestim_dataframe():\n with pytest.raises(ValueError):\n shapes.is_singlestim_dataframe(pd.DataFrame())\n\n df = pd.DataFrame([\n {'PTS_ELECTRODE': 'A01'},\n {'PTS_ELECTRODE': 'A02'}\n ])\n npt.assert_equal(shapes.is_singlestim_dataframe(df), True)\n\n df = pd.DataFrame([\n {'PTS_ELECTRODE1': 'A01', 'PTS_ELECTRODE2': 'A03'},\n {'PTS_ELECTRODE1': 'A02', 'PTS_ELECTRODE2': 'A04'}\n ])\n npt.assert_equal(shapes.is_singlestim_dataframe(df), False)\n\n\ndef test_calc_mean_images():\n with pytest.raises(ValueError):\n # empty list not allowed\n shapes.calc_mean_images(pd.DataFrame([]), groupby=[])\n with pytest.raises(ValueError):\n # groupby columns not present:\n shapes.calc_mean_images(pd.DataFrame([]))\n with pytest.raises(ValueError):\n # 'image' not in columns:\n shapes.calc_mean_images(pd.DataFrame([{'subject': 'S1'}]),\n groupby=['subject'])\n\n X, y = generate_dummy_data()\n Xy = pd.concat((X, y.drop(columns='subject')), axis=1)\n shapes.calc_mean_images(Xy, groupby=['subject'])\n"
] | [
[
"numpy.testing.assert_equal",
"pandas.Series",
"numpy.isclose",
"pandas.DataFrame",
"numpy.random.rand",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
klimpie94/Python-training | [
"7af210126cfe2e9386a8f22075ea0d7eff80daac"
] | [
"Day2/pandas-exercises-python/python-exercises-02-questions/utils/transformation_functions.py"
] | [
"\nimport pandas as pd\n\n\ndef read_csv_files(file_path):\n return pd.read_csv(file_path)\n\n\ndef filter_films(dataframe):\n pass\n\n\ndef join_categories_with_metadata(facts_df, categories_df):\n # Hint: You can use lambda functions to change the id column in order to\n # use join method in pandas.\n pass\n\n\ndef categories_into_one_single_column(categories_df):\n # Hint: When you use dataframe.idxmax(axis=1) you automatically\n # create a pd.Series with categorical values as strings.\n pass\n\n\ndef take_year_from_movie_title_string(movie_title_str):\n try:\n pass\n except (IndexError, ValueError):\n return 9999\n\n\ndef genre_count_table_for_movies_with_aggregation(categories_df):\n pass\n\n\ndef calculate_ratio_of_nomination_over_win(dataframe):\n # Hint 1: Use an additional function for handling\n # zero division error.\n # Hint 2: Nominations + Wins = Total Number of Nominations\n\n pass\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vanderschaarlab/MIRACLE | [
"ec28f5051d604a3134f9379b9a63a6cc379f2bc5"
] | [
"miracle/third_party/imputation_gain.py"
] | [
"# stdlib\nfrom typing import Tuple, Union\n\n# third party\nimport numpy as np\nfrom sklearn.base import TransformerMixin\n\n# Necessary packages\nimport torch\nfrom torch import nn\n\nEPS = 1e-8\n\nDEVICE = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n\ndef sample_Z(m: int, n: int) -> np.ndarray:\n \"\"\"Random sample generator for Z.\n\n Args:\n m: number of rows\n n: number of columns\n\n Returns:\n np.ndarray: generated random values\n \"\"\"\n res = np.random.uniform(0.0, 0.01, size=[m, n])\n return torch.from_numpy(res).to(DEVICE)\n\n\ndef sample_M(m: int, n: int, p: float) -> np.ndarray:\n \"\"\"Hint Vector Generation\n\n Args:\n m: number of rows\n n: number of columns\n p: hint rate\n\n Returns:\n np.ndarray: generated random values\n \"\"\"\n unif_prob = np.random.uniform(0.0, 1.0, size=[m, n])\n M = unif_prob > p\n M = 1.0 * M\n\n return torch.from_numpy(M).to(DEVICE)\n\n\ndef sample_idx(m: int, n: int) -> np.ndarray:\n \"\"\"Mini-batch generation\n\n Args:\n m: number of rows\n n: number of columns\n\n Returns:\n np.ndarray: generated random indices\n \"\"\"\n idx = np.random.permutation(m)\n idx = idx[:n]\n return idx\n\n\nclass GainModel:\n \"\"\"The core model for GAIN Imputation.\n\n Args:\n dim: float\n Number of features.\n h_dim: float\n Size of the hidden layer.\n loss_alpha: int\n Hyperparameter for the generator loss.\n \"\"\"\n\n def __init__(\n self,\n dim: int,\n h_dim: int,\n loss_alpha: float = 10,\n ) -> None:\n self.generator_layer = nn.Sequential(\n nn.Linear(dim * 2, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, dim),\n nn.Sigmoid(),\n ).to(DEVICE)\n self.discriminator_layer = nn.Sequential(\n nn.Linear(dim * 2, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, h_dim),\n nn.ReLU(),\n nn.Linear(h_dim, dim),\n nn.Sigmoid(),\n ).to(DEVICE)\n self.loss_alpha = loss_alpha\n\n def discriminator(self, X: torch.Tensor, hints: torch.Tensor) -> torch.Tensor:\n inputs = torch.cat([X, hints], dim=1).float()\n return self.discriminator_layer(inputs)\n\n def generator(self, X: torch.Tensor, mask: torch.Tensor) -> torch.Tensor:\n inputs = torch.cat([X, mask], dim=1).float()\n return self.generator_layer(inputs)\n\n def discr_loss(\n self, X: torch.Tensor, M: torch.Tensor, H: torch.Tensor\n ) -> torch.Tensor:\n G_sample = self.generator(X, M)\n X_hat = X * M + G_sample * (1 - M)\n D_prob = self.discriminator(X_hat, H)\n return -torch.mean(\n M * torch.log(D_prob + EPS) + (1 - M) * torch.log(1.0 - D_prob + EPS)\n )\n\n def gen_loss(\n self, X: torch.Tensor, M: torch.Tensor, H: torch.Tensor\n ) -> torch.Tensor:\n G_sample = self.generator(X, M)\n X_hat = X * M + G_sample * (1 - M)\n D_prob = self.discriminator(X_hat, H)\n\n G_loss1 = -torch.mean((1 - M) * torch.log(D_prob + EPS))\n MSE_train_loss = torch.mean((M * X - M * G_sample) ** 2) / torch.mean(M)\n\n return G_loss1 + self.loss_alpha * MSE_train_loss\n\n\nclass GainImputation(TransformerMixin):\n \"\"\"GAIN Imputation for static data using Generative Adversarial Nets.\n The training steps are:\n - The generato imputes the missing components conditioned on what is actually observed, and outputs a completed vector.\n - The discriminator takes a completed vector and attempts to determine which components were actually observed and which were imputed.\n\n Original Paper: J. Yoon, J. Jordon, M. van der Schaar, \"GAIN: Missing Data Imputation using Generative Adversarial Nets,\" ICML, 2018.\n\n\n Args:\n batch_size: int\n The batch size for the training steps.\n iterations: int\n Number of epochs for training.\n hint_rate: float\n Percentage of additional information for the discriminator.\n loss_alpha: int\n Hyperparameter for the generator loss.\n \"\"\"\n\n def __init__(\n self,\n batch_size: int = 128,\n iterations: int = 10000,\n hint_rate: float = 0.9,\n loss_alpha: float = 10,\n ) -> None:\n self.batch_size = batch_size\n self.iterations = iterations\n self.hint_rate = hint_rate\n self.loss_alpha = loss_alpha\n self.norm_parameters: Union[dict, None] = None\n self.model: Union[GainModel, None] = None\n\n def fit(self, X: torch.Tensor) -> \"GainImputation\":\n \"\"\"Train the GAIN model.\n\n Args:\n X: incomplete dataset.\n\n Returns:\n self: the updated model.\n \"\"\"\n X = X.clone()\n\n # Parameters\n no = len(X)\n dim = len(X[0, :])\n\n # Hidden state dimensions\n h_dim = dim\n\n # MinMaxScaler normalization\n min_val = np.zeros(dim)\n max_val = np.zeros(dim)\n\n X = X.cpu()\n\n for i in range(dim):\n min_val[i] = np.nanmin(X[:, i])\n X[:, i] = X[:, i] - np.nanmin(X[:, i])\n max_val[i] = np.nanmax(X[:, i])\n X[:, i] = X[:, i] / (np.nanmax(X[:, i]) + EPS)\n\n # Set missing\n mask = 1 - (1 * (np.isnan(X)))\n mask = mask.float().to(DEVICE)\n\n X = torch.nan_to_num(X)\n X = X.to(DEVICE)\n\n self.model = GainModel(dim, h_dim)\n\n D_solver = torch.optim.Adam(self.model.discriminator_layer.parameters())\n G_solver = torch.optim.Adam(self.model.generator_layer.parameters())\n\n def sample() -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n mb_size = min(self.batch_size, no)\n\n mb_idx = sample_idx(no, mb_size)\n x_mb = X[mb_idx, :].clone()\n m_mb = mask[mb_idx, :].clone()\n\n z_mb = sample_Z(mb_size, dim)\n h_mb = sample_M(mb_size, dim, 1 - self.hint_rate)\n h_mb = m_mb * h_mb\n\n x_mb = m_mb * x_mb + (1 - m_mb) * z_mb\n\n return x_mb, h_mb, m_mb\n\n for it in range(self.iterations):\n D_solver.zero_grad()\n\n x_mb, h_mb, m_mb = sample()\n\n D_loss = self.model.discr_loss(x_mb, m_mb, h_mb)\n D_loss.backward()\n D_solver.step()\n\n G_solver.zero_grad()\n x_mb, h_mb, m_mb = sample()\n G_loss = self.model.gen_loss(x_mb, m_mb, h_mb)\n G_loss.backward()\n G_solver.step()\n\n self.norm_parameters = {\"min\": min_val, \"max\": max_val}\n\n return self\n\n def transform(self, Xmiss: np.ndarray) -> np.ndarray:\n \"\"\"Return imputed data by trained GAIN model.\n\n Args:\n Xmiss: the array with missing data\n\n Returns:\n torch.Tensor: the array without missing data\n\n Raises:\n RuntimeError: if the result contains np.nans.\n \"\"\"\n Xmiss = torch.tensor(np.asarray(Xmiss)).to(DEVICE)\n if self.norm_parameters is None or self.model is None:\n raise RuntimeError(\"fit the model first\")\n\n X = Xmiss.clone()\n\n min_val = self.norm_parameters[\"min\"]\n max_val = self.norm_parameters[\"max\"]\n\n no, dim = X.shape\n\n X = X.cpu()\n # MinMaxScaler normalization\n for i in range(dim):\n X[:, i] = X[:, i] - min_val[i]\n X[:, i] = X[:, i] / (max_val[i] + EPS)\n\n # Set missing\n mask = 1 - (1 * (np.isnan(X)))\n mask = mask.float().to(DEVICE)\n\n x = torch.nan_to_num(X)\n x = x.to(DEVICE)\n\n # Imputed data\n z = sample_Z(no, dim)\n x = mask * x + (1 - mask) * z\n\n imputed_data = self.model.generator(x, mask)\n\n # Renormalize\n for i in range(dim):\n imputed_data[:, i] = imputed_data[:, i] * (max_val[i] + EPS)\n imputed_data[:, i] = imputed_data[:, i] + min_val[i]\n\n if np.all(np.isnan(imputed_data.detach().cpu().numpy())):\n err = \"The imputed result contains nan. This is a bug. Please report it on the issue tracker.\"\n raise RuntimeError(err)\n\n mask = mask.cpu()\n imputed_data = imputed_data.detach().cpu()\n\n return mask * np.nan_to_num(Xmiss.cpu()) + (1 - mask) * imputed_data\n\n def fit_transform(self, X: np.ndarray) -> np.ndarray:\n \"\"\"Imputes the provided dataset using the GAIN strategy.\n\n Args:\n X: np.ndarray\n A dataset with missing values.\n\n Returns:\n Xhat: The imputed dataset.\n \"\"\"\n X = torch.tensor(np.asarray(X)).cpu()\n return self.fit(X).transform(X).detach().cpu().numpy()\n"
] | [
[
"numpy.nanmax",
"torch.mean",
"torch.cat",
"numpy.isnan",
"numpy.asarray",
"numpy.nanmin",
"torch.nan_to_num",
"torch.from_numpy",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"numpy.random.permutation",
"torch.log",
"torch.cuda.is_available",
"numpy.random.uniform",
"torch.nn.ReLU",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zpreisler/tensorflow | [
"f2b17b22e12bd743b66945070f338f70b5fa3332"
] | [
"tensorflow/contrib/distribute/python/metrics_v1_test.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for V1 metrics.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom absl.testing import parameterized\n\nfrom tensorflow.contrib.distribute.python import combinations\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.eager import test\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import metrics\nfrom tensorflow.python.ops import variables\n\n\ndef _labeled_dataset_fn():\n # First four batches of x: labels, predictions -> (labels == predictions)\n # 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False\n # 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False\n # 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False\n # 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True\n return dataset_ops.Dataset.range(1000).map(\n lambda x: {\"labels\": x % 5, \"predictions\": x % 3}).batch(4)\n\n\ndef _boolean_dataset_fn():\n # First four batches of labels, predictions: {TP, FP, TN, FN}\n # with a threshold of 0.5:\n # T, T -> TP; F, T -> FP; T, F -> FN\n # F, F -> TN; T, T -> TP; F, T -> FP\n # T, F -> FN; F, F -> TN; T, T -> TP\n # F, T -> FP; T, F -> FN; F, F -> TN\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [True, False, True, False],\n \"predictions\": [True, True, False, False]}).repeat().batch(3)\n\n\ndef _threshold_dataset_fn():\n # First four batches of labels, predictions: {TP, FP, TN, FN}\n # with a threshold of 0.5:\n # True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN\n # False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP\n # True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP\n # False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [True, False, True, False],\n \"predictions\": [1.0, 0.75, 0.25, 0.]}).repeat().batch(3)\n\n\ndef _regression_dataset_fn():\n return dataset_ops.Dataset.from_tensor_slices({\n \"labels\": [1., .5, 1., 0.],\n \"predictions\": [1., .75, .25, 0.]}).repeat()\n\n\n# TODO(priyag): Add TPU Strategy to this once metrics aggregate correctly using\n# TowerLocalVariables on TPUs. Submit http://cl/208914352.\ndef all_combinations():\n return combinations.combine(\n distribution=[combinations.default_strategy,\n combinations.one_device_strategy,\n combinations.mirrored_strategy_with_gpu_and_cpu,\n combinations.mirrored_strategy_with_two_gpus],\n mode=[\"graph\"])\n\n\n# TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k,\n# metrics.precision_at_k\nclass MetricsV1Test(test.TestCase, parameterized.TestCase):\n\n def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn):\n with ops.Graph().as_default(), distribution.scope():\n iterator = distribution.distribute_dataset(\n dataset_fn).make_initializable_iterator()\n value, update = distribution.call_for_each_tower(\n metric_fn, iterator.get_next())\n update = distribution.group(update)\n self.evaluate(iterator.initializer)\n self.evaluate(variables.local_variables_initializer())\n # TODO(josh11b): Once we switch to using a global batch size for input,\n # replace \"distribution.num_towers\" with \"1\".\n batches_per_update = distribution.num_towers\n\n # Update variables using the first `num_towers` batches.\n self.evaluate(update)\n self.assertAllClose(expected_fn(batches_per_update), self.evaluate(value),\n 0.001, msg=\"After first update\")\n\n # Update variables using the second `num_towers` batches.\n self.evaluate(update)\n self.assertAllClose(expected_fn(2 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After second update\")\n\n if batches_per_update == 1: # Consume 4 input batches\n self.evaluate(update)\n self.assertAllClose(expected_fn(3 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After third update\")\n self.evaluate(update)\n self.assertAllClose(expected_fn(4 * batches_per_update),\n self.evaluate(value),\n 0.001,\n msg=\"After fourth update\")\n\n @combinations.generate(all_combinations())\n def testMean(self, distribution):\n def _dataset_fn():\n return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(4)\n\n def _expected_fn(num_batches):\n # Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.\n return num_batches * 2 - 0.5\n\n self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAccuracy(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.accuracy(labels, predictions)\n\n def _expected_fn(num_batches):\n return [3./4, 3./8, 3./12, 4./16][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanPerClassAccuracy(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_per_class_accuracy(\n labels, predictions, num_classes=5)\n\n def _expected_fn(num_batches):\n mean = lambda x: sum(x) / len(x)\n return [mean([1., 1., 1., 0., 0.]),\n mean([0.5, 0.5, 0.5, 0., 0.]),\n mean([1./3, 1./3, 0.5, 0., 0.]),\n mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanIOU(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_iou(\n labels, predictions, num_classes=5)\n\n def _expected_fn(num_batches):\n mean = lambda x: sum(x) / len(x)\n return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch\n mean([1./4, 1./4, 1./3, 0., 0.]),\n mean([1./6, 1./6, 1./5, 0., 0.]),\n mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1]\n\n self._test_metric(\n distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanTensor(self, distribution):\n def _dataset_fn():\n dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float)\n # Want to produce a fixed, known shape, so drop remainder when batching.\n dataset = dataset.batch(4, drop_remainder=True)\n return dataset\n\n def _expected_fn(num_batches):\n # Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2\n # Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1\n # Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches\n # Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1\n first = 2. * num_batches - 2.\n return [first, first + 1., first + 2., first + 3.]\n\n self._test_metric(\n distribution, _dataset_fn, metrics.mean_tensor, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAUCROC(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.auc(labels, predictions, num_thresholds=8, curve=\"ROC\",\n summation_method=\"careful_interpolation\")\n\n def _expected_fn(num_batches):\n return [0.5, 7./9, 0.8, 0.75][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testAUCPR(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.auc(labels, predictions, num_thresholds=8, curve=\"PR\",\n summation_method=\"careful_interpolation\")\n\n def _expected_fn(num_batches):\n return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalseNegatives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_negatives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 1., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalseNegativesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_negatives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [1.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTrueNegatives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_negatives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 1., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTrueNegativesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_negatives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[0.], [1.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalsePositives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_positives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 2., 2., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testFalsePositivesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.false_positives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [2.], [2.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTruePositives(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_positives(labels, predictions)\n\n def _expected_fn(num_batches):\n return [1., 2., 3., 3.][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testTruePositivesAtThresholds(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.true_positives_at_thresholds(labels, predictions, [.5])\n\n def _expected_fn(num_batches):\n return [[1.], [2.], [3.], [3.]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testPrecision(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.precision(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0.5, 0.5, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testPrecisionAtThreshold(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.precision_at_thresholds(labels, predictions, [0.5])\n\n def _expected_fn(num_batches):\n return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRecall(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.recall(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0.5, 2./3, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRecallAtThreshold(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.recall_at_thresholds(labels, predictions, [0.5])\n\n def _expected_fn(num_batches):\n return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testMeanSquaredError(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.mean_squared_error(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 1./32, 0.208333, 0.15625][num_batches - 1]\n\n self._test_metric(\n distribution, _regression_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testRootMeanSquaredError(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.root_mean_squared_error(labels, predictions)\n\n def _expected_fn(num_batches):\n return [0., 0.176777, 0.456435, 0.395285][num_batches - 1]\n\n self._test_metric(\n distribution, _regression_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testSensitivityAtSpecificity(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.sensitivity_at_specificity(labels, predictions, 0.8)\n\n def _expected_fn(num_batches):\n return [0.5, 2./3, 0.6, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n @combinations.generate(all_combinations())\n def testSpecificityAtSensitivity(self, distribution):\n def _metric_fn(x):\n labels = x[\"labels\"]\n predictions = x[\"predictions\"]\n return metrics.specificity_at_sensitivity(labels, predictions, 0.95)\n\n def _expected_fn(num_batches):\n return [0., 1./3, 0.5, 0.5][num_batches - 1]\n\n self._test_metric(\n distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] | [
[
"tensorflow.python.ops.metrics.mean_iou",
"tensorflow.python.ops.metrics.recall_at_thresholds",
"tensorflow.python.ops.metrics.false_positives",
"tensorflow.python.data.ops.dataset_ops.Dataset.range",
"tensorflow.python.ops.metrics.true_negatives",
"tensorflow.python.ops.metrics.precision",
"tensorflow.python.ops.metrics.precision_at_thresholds",
"tensorflow.python.ops.metrics.true_negatives_at_thresholds",
"tensorflow.python.ops.metrics.root_mean_squared_error",
"tensorflow.python.ops.metrics.false_negatives_at_thresholds",
"tensorflow.python.ops.metrics.mean_per_class_accuracy",
"tensorflow.python.ops.metrics.specificity_at_sensitivity",
"tensorflow.contrib.distribute.python.combinations.combine",
"tensorflow.python.ops.metrics.mean_squared_error",
"tensorflow.python.ops.metrics.true_positives_at_thresholds",
"tensorflow.python.ops.metrics.auc",
"tensorflow.python.ops.metrics.recall",
"tensorflow.python.ops.metrics.false_negatives",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.eager.test.main",
"tensorflow.python.ops.metrics.false_positives_at_thresholds",
"tensorflow.python.ops.metrics.accuracy",
"tensorflow.python.ops.metrics.true_positives",
"tensorflow.python.ops.metrics.sensitivity_at_specificity",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.ops.variables.local_variables_initializer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.2",
"2.9",
"1.5",
"1.7",
"2.5",
"2.8",
"2.10"
]
}
] |
keflavich/sedfitter | [
"ec8722ec423ac684e4930fe23a98cd7b2d5b9f50"
] | [
"sedfitter/models.py"
] | [
"from __future__ import print_function, division\n\nimport os\n\nimport numpy as np\nfrom astropy.table import Table\nfrom astropy import units as u\n\nfrom .convolved_fluxes import ConvolvedFluxes, MonochromaticFluxes\nfrom . import fitting_routines as f\nfrom .utils import parfile\nfrom .utils.validator import validate_array\nfrom .fit_info import FitInfo\nfrom .filter import Filter\nfrom . import six\n\n\nclass Models(object):\n\n def __init__(self):\n\n self.names = None\n self.fluxes = None\n self.distances = None\n self.apertures = None\n self.logd = None\n self.wavelengths = None\n self.distances = None\n self.extended = []\n\n @property\n def wavelengths(self):\n \"\"\"\n The wavelengths at which the models are defined\n \"\"\"\n return self._wavelengths\n\n @wavelengths.setter\n def wavelengths(self, value):\n if value is None:\n self._wavelengths = None\n else:\n self._wavelengths = validate_array('wavelengths', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def distances(self):\n \"\"\"\n The distances at which the models are defined\n \"\"\"\n return self._distances\n\n @distances.setter\n def distances(self, value):\n if value is None:\n self._distances = None\n else:\n self._distances = validate_array('distances', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def apertures(self):\n \"\"\"\n The apertures at which the fluxes are defined\n \"\"\"\n return self._apertures\n\n @apertures.setter\n def apertures(self, value):\n if value is None:\n self._apertures = None\n else:\n self._apertures = validate_array('apertures', value, domain='positive', ndim=1, physical_type='length')\n\n @property\n def fluxes(self):\n \"\"\"\n The model fluxes\n \"\"\"\n return self._fluxes\n\n @fluxes.setter\n def fluxes(self, value):\n if value is None:\n self._fluxes = value\n else:\n if self.n_distances is None:\n self._fluxes = validate_array('fluxes', value, ndim=2,\n shape=(self.n_models, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n else:\n self._fluxes = validate_array('fluxes', value, ndim=3,\n shape=(self.n_models, self.n_distances, self.n_wav),\n physical_type=('power', 'flux', 'spectral flux density'))\n\n @property\n def n_ap(self):\n if self.apertures is None:\n return 1\n else:\n return len(self.apertures)\n\n @property\n def n_wav(self):\n if self.wavelengths is None:\n return None\n else:\n return len(self.wavelengths)\n\n @property\n def n_distances(self):\n if self.distances is None:\n return None\n else:\n return len(self.distances)\n\n @property\n def n_models(self):\n if self.names is None:\n return None\n else:\n return len(self.names)\n\n @property\n def valid(self):\n if self.fluxes is None:\n return None\n else:\n return self.fluxes != 0\n\n @property\n def log_fluxes_mJy(self):\n values = np.zeros(self.fluxes.shape)\n values[~self.valid] = -np.inf\n values[self.valid] = np.log10(self.fluxes[self.valid].to(u.mJy).value)\n return values\n\n @classmethod\n def read(cls, directory, filters, distance_range=None, remove_resolved=False):\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n if modpar.get('version', 1) == 1:\n return cls._read_version_1(directory, filters,\n distance_range=distance_range,\n remove_resolved=remove_resolved)\n else:\n return cls._read_version_2(directory, filters,\n distance_range=distance_range,\n remove_resolved=remove_resolved)\n\n @classmethod\n def _read_version_1(cls, directory, filters, distance_range=None, remove_resolved=None):\n\n m = cls()\n\n # Read in model parameters\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n\n print(\" ------------------------------------------------------------\")\n print(\" => Model parameters\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n print(\" Models : %s\" % modpar['name'])\n print(\" Log[d] stepping : %g\" % modpar['logd_step'])\n\n if modpar['aperture_dependent']:\n\n distance_range_kpc = distance_range.to(u.kpc).value\n\n if distance_range:\n if distance_range_kpc[0] == distance_range_kpc[1]:\n n_distances = 1\n m.distances = np.array([distance_range_kpc[0]]) * u.kpc\n else:\n n_distances = 1 + (np.log10(distance_range_kpc[1]) - np.log10(distance_range_kpc[0])) / modpar['logd_step']\n m.distances = np.logspace(np.log10(distance_range_kpc[0]), np.log10(distance_range_kpc[1]), n_distances) * u.kpc\n print(\" Number of distances : %i\" % m.n_distances)\n else:\n raise Exception(\"For aperture-dependent models, a distange range is required\")\n\n print(\"\")\n print(\" ------------------------------------------------------------\")\n print(\" => Reading in convolved fluxes\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n\n m.wavelengths = np.zeros(len(filters)) * u.micron\n\n for ifilt, filt in enumerate(filters):\n\n filename = '%s/convolved/%s.fits' % (directory, filt['name'])\n\n if not os.path.exists(filename):\n if os.path.exists(filename + '.gz'):\n filename += '.gz'\n else:\n raise Exception(\"File not found: \" + filename)\n\n print(\" Reading \" + filename)\n\n conv = ConvolvedFluxes.read(filename)\n\n if ifilt == 0:\n if m.n_distances is None:\n model_fluxes = np.zeros((conv.n_models, len(filters))) * u.mJy\n extended = None\n else:\n model_fluxes = np.zeros((conv.n_models, m.n_distances, len(filters))) * u.mJy\n extended = np.zeros((conv.n_models, m.n_distances, len(filters)), dtype=bool)\n\n m.wavelengths[ifilt] = conv.central_wavelength\n\n if m.n_distances is not None:\n apertures_au = filt['aperture_arcsec'] * m.distances.to(u.pc).value * u.au\n conv = conv.interpolate(apertures_au)\n conv.flux = conv.flux * (u.kpc / m.distances) ** 2\n m.logd = np.log10(m.distances.to(u.kpc).value)\n if remove_resolved:\n extended[:, :, ifilt] = apertures_au[np.newaxis,:] < conv.find_radius_sigma(0.5)[:, np.newaxis]\n model_fluxes[:, :, ifilt] = conv.flux\n else:\n model_fluxes[:, ifilt] = conv.flux[:, 0]\n\n try:\n m.names = np.char.strip(conv.model_names)\n except:\n m.names = np.array([x.strip() for x in conv.model_names], dtype=conv.model_names.dtype)\n\n m.fluxes = model_fluxes\n\n if extended is not None:\n m.extended = extended\n\n return m\n\n @classmethod\n def _read_version_2(cls, directory, filters, distance_range=None, remove_resolved=None):\n\n m = cls()\n\n # Read in model parameters\n modpar = parfile.read(\"%s/models.conf\" % directory, 'conf')\n\n print(\" ------------------------------------------------------------\")\n print(\" => Model parameters\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n print(\" Models : %s\" % modpar['name'])\n print(\" Log[d] stepping : %g\" % modpar['logd_step'])\n\n if modpar['aperture_dependent']:\n\n distance_range_kpc = distance_range.to(u.kpc).value\n\n if distance_range:\n if distance_range_kpc[0] == distance_range_kpc[1]:\n n_distances = 1\n m.distances = np.array([distance_range_kpc[0]]) * u.kpc\n else:\n n_distances = 1 + (np.log10(distance_range_kpc[1]) - np.log10(distance_range_kpc[0])) / modpar['logd_step']\n m.distances = np.logspace(np.log10(distance_range_kpc[0]), np.log10(distance_range_kpc[1]), n_distances) * u.kpc\n print(\" Number of distances : %i\" % m.n_distances)\n else:\n raise Exception(\"For aperture-dependent models, a distange range is required\")\n\n print(\"\")\n print(\" ------------------------------------------------------------\")\n print(\" => Reading in convolved fluxes\")\n print(\" ------------------------------------------------------------\")\n print(\"\")\n\n # Start off by reading in main flux cube\n from .sed.cube import SEDCube\n cube = SEDCube.read(os.path.join(directory, 'flux.fits'))\n\n # Initialize model flux array and array to indicate whether models are\n # extended\n if m.n_distances is None:\n model_fluxes = np.zeros((cube.n_models, len(filters))) * u.mJy\n extended = None\n else:\n model_fluxes = np.zeros((cube.n_models, m.n_distances, len(filters))) * u.mJy\n extended = np.zeros((cube.n_models, m.n_distances, len(filters)), dtype=bool)\n\n # Define empty wavelength array\n m.wavelengths = np.zeros(len(filters)) * u.micron\n\n for ifilt, filt in enumerate(filters):\n\n if 'name' in filt:\n\n filename = '%s/convolved/%s.fits' % (directory, filt['name'])\n\n if not os.path.exists(filename):\n if os.path.exists(filename + '.gz'):\n filename += '.gz'\n else:\n raise Exception(\"File not found: \" + filename)\n\n print(\" Reading \" + filename)\n\n conv = ConvolvedFluxes.read(filename)\n\n m.wavelengths[ifilt] = conv.central_wavelength\n\n elif 'wav' in filt:\n\n # Find wavelength index\n wavelength_index = np.argmin(np.abs(cube.wav - filt['wav']))\n\n print(\" Reading fluxes at {0}\".format(filt['wav']))\n\n conv = MonochromaticFluxes.from_sed_cube(cube, wavelength_index)\n\n m.wavelengths[ifilt] = filt['wav']\n\n if m.n_distances is not None:\n apertures_au = filt['aperture_arcsec'] * m.distances.to(u.pc).value * u.au\n conv = conv.interpolate(apertures_au)\n conv.flux = conv.flux * (u.kpc / m.distances) ** 2\n m.logd = np.log10(m.distances.to(u.kpc).value)\n # TODO: rather than compute the radius for each model, just\n # check directly the condition.\n if remove_resolved:\n extended[:, :, ifilt] = apertures_au[np.newaxis,:] < conv.find_radius_sigma(0.5)[:, np.newaxis]\n model_fluxes[:, :, ifilt] = conv.flux\n else:\n model_fluxes[:, ifilt] = conv.flux[:, 0]\n\n try:\n m.names = np.char.strip(conv.model_names)\n except:\n m.names = np.array([x.strip() for x in conv.model_names], dtype=conv.model_names.dtype)\n\n m.fluxes = model_fluxes\n\n if extended is not None:\n m.extended = extended\n\n return m\n\n def fit(self, source, av_law, sc_law, av_min, av_max, output_convolved=False):\n\n weight, log_flux, log_error = source.get_log_fluxes()\n\n model_fluxes = self.log_fluxes_mJy\n\n if model_fluxes.ndim == 2: # Aperture-independent fitting\n\n # Use 2-parameter linear regression to find the best-fit av and scale for each model\n residual = log_flux - model_fluxes\n av_best, sc_best = f.linear_regression(residual, weight, av_law, sc_law)\n\n # Use optimal scaling for Avs that are outside range\n reset1 = (av_best < av_min)\n reset2 = (av_best > av_max)\n av_best[reset1] = av_min\n av_best[reset2] = av_max\n reset = reset1 | reset2\n sc_best[reset] = f.optimal_scaling(residual[reset] - av_best[reset][:, np.newaxis] * av_law[np.newaxis, :], weight, sc_law)\n\n # Compute best-fit model in each case\n model = av_best[:, np.newaxis] * av_law[np.newaxis, :] + sc_best[:, np.newaxis] * sc_law[np.newaxis,:]\n\n # Calculate the chi-squared value\n ch_best = f.chi_squared(source.valid, residual, log_error, weight, model)\n\n # Extract convolved model fluxes for best-fit\n model_fluxes = model + model_fluxes\n\n elif model_fluxes.ndim == 3: # Aperture dependent fitting\n\n # Use optimal scaling to fit the Av\n residual = log_flux - model_fluxes\n av_best = f.optimal_scaling(residual, weight, av_law)\n\n # Reset to valid range\n av_best[av_best < av_min] = av_min\n av_best[av_best > av_max] = av_max\n\n # Compute best-fit model in each case\n model = av_best[:, :, np.newaxis] * av_law[np.newaxis, np.newaxis,:]\n\n # Calculate the chi-squared value\n ch_best = f.chi_squared(source.valid, residual, log_error, weight, model)\n\n # Remove extended objects\n if type(self.extended) == np.ndarray:\n reset = np.any(self.extended[:, :, source.valid > 0], axis=2)\n ch_best[reset] = np.inf\n\n # Find best-fit distance in each case\n best = np.argmin(ch_best, axis=1)\n\n sc_best = self.logd[best]\n\n ch_best = ch_best[np.arange(self.n_models), best]\n av_best = av_best[np.arange(self.n_models), best]\n\n # Extract convolved model fluxes for best-fit\n model_fluxes = (model + model_fluxes)[np.arange(self.n_models), best, :]\n\n else:\n\n raise Exception(\"Unexpected number of dimensions in flux array\")\n\n info = FitInfo()\n info.source = source\n info.av = av_best\n info.sc = sc_best\n info.chi2 = ch_best\n info.model_name = self.names\n info.model_fluxes = model_fluxes\n info.sort()\n\n return info\n\n\ndef load_parameter_table(model_dir):\n\n if os.path.exists(model_dir + '/parameters.fits'):\n t = Table.read(model_dir + '/parameters.fits')\n elif os.path.exists(model_dir + '/parameters.fits.gz'):\n t = Table.read(model_dir + '/parameters.fits.gz')\n else:\n raise Exception(\"Parameter file not found in %s\" % model_dir)\n\n return t\n"
] | [
[
"numpy.abs",
"numpy.arange",
"numpy.char.strip",
"numpy.log10",
"numpy.argmin",
"numpy.any",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Next-Trends/rasa | [
"c06dc26b3a57dd1114b60aebcc9ccd3bbb8308d7"
] | [
"rasa/nlu/featurizers/sparse_featurizer/regex_featurizer.py"
] | [
"from __future__ import annotations\nimport logging\nimport re\nfrom typing import Any, Dict, List, Optional, Text, Tuple, Type\nimport numpy as np\nimport scipy.sparse\nfrom rasa.nlu.tokenizers.tokenizer import Tokenizer\n\nimport rasa.shared.utils.io\nimport rasa.utils.io\nimport rasa.nlu.utils.pattern_utils as pattern_utils\nfrom rasa.engine.graph import ExecutionContext, GraphComponent\nfrom rasa.engine.recipes.default_recipe import DefaultV1Recipe\nfrom rasa.engine.storage.resource import Resource\nfrom rasa.engine.storage.storage import ModelStorage\nfrom rasa.nlu.constants import TOKENS_NAMES\nfrom rasa.nlu.featurizers.sparse_featurizer.sparse_featurizer import SparseFeaturizer\nfrom rasa.shared.nlu.constants import TEXT, RESPONSE, ACTION_TEXT\nfrom rasa.shared.nlu.training_data.training_data import TrainingData\nfrom rasa.shared.nlu.training_data.message import Message\n\nlogger = logging.getLogger(__name__)\n\n\[email protected](\n DefaultV1Recipe.ComponentType.MESSAGE_FEATURIZER, is_trainable=True\n)\nclass RegexFeaturizer(SparseFeaturizer, GraphComponent):\n \"\"\"Adds message features based on regex expressions.\"\"\"\n\n @classmethod\n def required_components(cls) -> List[Type]:\n \"\"\"Components that should be included in the pipeline before this component.\"\"\"\n return [Tokenizer]\n\n @staticmethod\n def get_default_config() -> Dict[Text, Any]:\n \"\"\"Returns the component's default config.\"\"\"\n return {\n **SparseFeaturizer.get_default_config(),\n # text will be processed with case sensitive as default\n \"case_sensitive\": True,\n # use lookup tables to generate features\n \"use_lookup_tables\": True,\n # use regexes to generate features\n \"use_regexes\": True,\n # use match word boundaries for lookup table\n \"use_word_boundaries\": True,\n }\n\n def __init__(\n self,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n known_patterns: Optional[List[Dict[Text, Text]]] = None,\n ) -> None:\n \"\"\"Constructs new features for regexes and lookup table using regex expressions.\n\n Args:\n config: Configuration for the component.\n model_storage: Storage which graph components can use to persist and load\n themselves.\n resource: Resource locator for this component which can be used to persist\n and load itself from the `model_storage`.\n execution_context: Information about the current graph run.\n known_patterns: Regex Patterns the component should pre-load itself with.\n \"\"\"\n super().__init__(execution_context.node_name, config)\n\n self._model_storage = model_storage\n self._resource = resource\n\n self.known_patterns = known_patterns if known_patterns else []\n self.case_sensitive = config[\"case_sensitive\"]\n self.finetune_mode = execution_context.is_finetuning\n\n @classmethod\n def create(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n ) -> RegexFeaturizer:\n \"\"\"Creates a new untrained component (see parent class for full docstring).\"\"\"\n return cls(config, model_storage, resource, execution_context)\n\n def _merge_new_patterns(self, new_patterns: List[Dict[Text, Text]]) -> None:\n \"\"\"Updates already known patterns with new patterns extracted from data.\n\n New patterns should always be added to the end of the existing\n patterns and the order of the existing patterns should not be disturbed.\n\n Args:\n new_patterns: Patterns extracted from training data and to be merged with\n known patterns.\n \"\"\"\n pattern_name_index_map = {\n pattern[\"name\"]: index for index, pattern in enumerate(self.known_patterns)\n }\n for extra_pattern in new_patterns:\n new_pattern_name = extra_pattern[\"name\"]\n\n # Some patterns may have just new examples added\n # to them. These do not count as additional pattern.\n if new_pattern_name in pattern_name_index_map:\n self.known_patterns[pattern_name_index_map[new_pattern_name]][\n \"pattern\"\n ] = extra_pattern[\"pattern\"]\n else:\n self.known_patterns.append(extra_pattern)\n\n def train(self, training_data: TrainingData) -> Resource:\n \"\"\"Trains the component with all patterns extracted from training data.\"\"\"\n patterns_from_data = pattern_utils.extract_patterns(\n training_data,\n use_lookup_tables=self._config[\"use_lookup_tables\"],\n use_regexes=self._config[\"use_regexes\"],\n use_word_boundaries=self._config[\"use_word_boundaries\"],\n )\n if self.finetune_mode:\n # Merge patterns extracted from data with known patterns\n self._merge_new_patterns(patterns_from_data)\n else:\n self.known_patterns = patterns_from_data\n\n self._persist()\n return self._resource\n\n def process_training_data(self, training_data: TrainingData) -> TrainingData:\n \"\"\"Processes the training examples (see parent class for full docstring).\"\"\"\n for example in training_data.training_examples:\n for attribute in [TEXT, RESPONSE, ACTION_TEXT]:\n self._text_features_with_regex(example, attribute)\n\n return training_data\n\n def process(self, messages: List[Message]) -> List[Message]:\n \"\"\"Featurizes all given messages in-place.\n\n Returns:\n the given list of messages which have been modified in-place\n \"\"\"\n for message in messages:\n self._text_features_with_regex(message, TEXT)\n\n return messages\n\n def _text_features_with_regex(self, message: Message, attribute: Text) -> None:\n \"\"\"Helper method to extract features and set them appropriately in the message.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n \"\"\"\n if self.known_patterns:\n sequence_features, sentence_features = self._features_for_patterns(\n message, attribute\n )\n\n self.add_features_to_message(\n sequence_features, sentence_features, attribute, message\n )\n\n def _features_for_patterns(\n self, message: Message, attribute: Text\n ) -> Tuple[Optional[scipy.sparse.coo_matrix], Optional[scipy.sparse.coo_matrix]]:\n \"\"\"Checks which known patterns match the message.\n\n Given a sentence, returns a vector of {1,0} values indicating which\n regexes did match. Furthermore, if the\n message is tokenized, the function will mark all tokens with a dict\n relating the name of the regex to whether it was matched.\n\n Args:\n message: Message to be featurized.\n attribute: Attribute of message to be featurized.\n\n Returns:\n Token and sentence level features of message attribute.\n \"\"\"\n # Attribute not set (e.g. response not present)\n if not message.get(attribute):\n return None, None\n\n tokens = message.get(TOKENS_NAMES[attribute], [])\n\n if not tokens:\n # nothing to featurize\n return None, None\n\n flags = 0 # default flag\n if not self.case_sensitive:\n flags = re.IGNORECASE\n\n sequence_length = len(tokens)\n\n num_patterns = len(self.known_patterns)\n\n sequence_features = np.zeros([sequence_length, num_patterns])\n sentence_features = np.zeros([1, num_patterns])\n\n for pattern_index, pattern in enumerate(self.known_patterns):\n matches = re.finditer(\n pattern[\"pattern\"], message.get(attribute), flags=flags\n )\n matches = list(matches)\n\n for token_index, t in enumerate(tokens):\n patterns = t.get(\"pattern\", default={})\n patterns[pattern[\"name\"]] = False\n\n for match in matches:\n if t.start < match.end() and t.end > match.start():\n patterns[pattern[\"name\"]] = True\n sequence_features[token_index][pattern_index] = 1.0\n if attribute in [RESPONSE, TEXT, ACTION_TEXT]:\n # sentence vector should contain all patterns\n sentence_features[0][pattern_index] = 1.0\n\n t.set(\"pattern\", patterns)\n\n return (\n scipy.sparse.coo_matrix(sequence_features),\n scipy.sparse.coo_matrix(sentence_features),\n )\n\n @classmethod\n def load(\n cls,\n config: Dict[Text, Any],\n model_storage: ModelStorage,\n resource: Resource,\n execution_context: ExecutionContext,\n **kwargs: Any,\n ) -> RegexFeaturizer:\n \"\"\"Loads trained component (see parent class for full docstring).\"\"\"\n known_patterns = None\n\n try:\n with model_storage.read_from(resource) as model_dir:\n patterns_file_name = model_dir / \"patterns.pkl\"\n known_patterns = rasa.shared.utils.io.read_json_file(patterns_file_name)\n except (ValueError, FileNotFoundError):\n logger.warning(\n f\"Failed to load `{cls.__class__.__name__}` from model storage. \"\n f\"Resource '{resource.name}' doesn't exist.\"\n )\n\n return cls(\n config,\n model_storage,\n resource,\n execution_context,\n known_patterns=known_patterns,\n )\n\n def _persist(self) -> None:\n with self._model_storage.write_to(self._resource) as model_dir:\n regex_file = model_dir / \"patterns.pkl\"\n rasa.shared.utils.io.dump_obj_as_json_to_file(\n regex_file, self.known_patterns\n )\n\n @classmethod\n def validate_config(cls, config: Dict[Text, Any]) -> None:\n \"\"\"Validates that the component is configured properly.\"\"\"\n pass\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arnaudgelas/pytorch-lightning | [
"cc624358c8e396e966f9c51b3010f6a986047fc6"
] | [
"tests/models/test_restore.py"
] | [
"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport glob\nimport logging as log\nimport os\nimport pickle\nfrom copy import deepcopy\n\nimport cloudpickle\nimport pytest\nimport torch\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader\n\nimport tests.base.develop_pipelines as tpipes\nimport tests.base.develop_utils as tutils\nfrom pytorch_lightning import Callback, LightningModule, Trainer, seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom tests.base import BoringModel, EvalModelTemplate, GenericEvalModelTemplate, TrialMNIST\n\n\nclass ModelTrainerPropertyParity(Callback):\n\n def _check_properties(self, trainer, pl_module):\n assert trainer.global_step == pl_module.global_step\n assert trainer.current_epoch == pl_module.current_epoch\n\n def on_train_start(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n def on_train_batch_start(self, trainer, pl_module, *args, **kwargs):\n self._check_properties(trainer, pl_module)\n\n def on_train_batch_end(self, trainer, pl_module, *args, **kwargs):\n self._check_properties(trainer, pl_module)\n\n def on_epoch_end(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n def on_train_end(self, trainer, pl_module):\n self._check_properties(trainer, pl_module)\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_model_properties_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that properties like `current_epoch` and `global_step`\n in model and trainer are always the same. \"\"\"\n model = EvalModelTemplate()\n checkpoint_callback = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer_args = dict(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=False,\n enable_pl_optimizer=enable_pl_optimizer,\n callbacks=[checkpoint_callback, ModelTrainerPropertyParity()], # this performs the assertions\n )\n trainer = Trainer(**trainer_args)\n trainer.fit(model)\n\n trainer_args.update(max_epochs=2)\n trainer = Trainer(**trainer_args, resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n trainer.fit(model)\n\n\ndef test_try_resume_from_non_existing_checkpoint(tmpdir):\n \"\"\" Test that trying to resume from non-existing `resume_from_checkpoint` fail without error.\"\"\"\n model = BoringModel()\n checkpoint_cb = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n logger=False,\n callbacks=[checkpoint_cb],\n limit_train_batches=0.1,\n limit_val_batches=0.1,\n )\n # Generate checkpoint `last.ckpt` with BoringModel\n trainer.fit(model)\n # `True` if resume/restore successfully else `False`\n assert trainer.checkpoint_connector.restore(str(tmpdir / \"last.ckpt\"), trainer.on_gpu)\n assert not trainer.checkpoint_connector.restore(str(tmpdir / \"last_non_existing.ckpt\"), trainer.on_gpu)\n\n\nclass CaptureCallbacksBeforeTraining(Callback):\n callbacks = []\n\n def on_train_start(self, trainer, pl_module):\n self.callbacks = deepcopy(trainer.callbacks)\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_callbacks_state_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that resuming from a checkpoint restores callbacks that persist state. \"\"\"\n model = EvalModelTemplate()\n callback_capture = CaptureCallbacksBeforeTraining()\n\n def get_trainer_args():\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer_args = dict(\n default_root_dir=tmpdir,\n max_steps=1,\n logger=False,\n enable_pl_optimizer=enable_pl_optimizer,\n callbacks=[\n checkpoint,\n callback_capture,\n ]\n )\n assert checkpoint.best_model_path == \"\"\n assert checkpoint.best_model_score is None\n return trainer_args\n\n # initial training\n trainer = Trainer(**get_trainer_args())\n trainer.fit(model)\n callbacks_before_resume = deepcopy(trainer.callbacks)\n\n # resumed training\n trainer = Trainer(**get_trainer_args(), resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n trainer.fit(model)\n\n assert len(callbacks_before_resume) == len(callback_capture.callbacks)\n\n for before, after in zip(callbacks_before_resume, callback_capture.callbacks):\n if isinstance(before, ModelCheckpoint):\n assert before.best_model_path == after.best_model_path\n assert before.best_model_score == after.best_model_score\n\n\[email protected](\"enable_pl_optimizer\", [False, True])\ndef test_callbacks_references_resume_from_checkpoint(enable_pl_optimizer, tmpdir):\n \"\"\" Test that resuming from a checkpoint sets references as expected. \"\"\"\n model = EvalModelTemplate()\n args = {'default_root_dir': tmpdir, 'max_steps': 1, 'logger': False, \"enable_pl_optimizer\": enable_pl_optimizer}\n\n # initial training\n checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n trainer = Trainer(**args, callbacks=[checkpoint])\n assert checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback\n trainer.fit(model)\n\n # resumed training\n new_checkpoint = ModelCheckpoint(dirpath=tmpdir, monitor=\"early_stop_on\", save_last=True)\n # pass in a new checkpoint object, which should take\n # precedence over the one in the last.ckpt file\n trainer = Trainer(**args, callbacks=[new_checkpoint], resume_from_checkpoint=str(tmpdir / \"last.ckpt\"))\n assert checkpoint is not new_checkpoint\n assert new_checkpoint is trainer.callbacks[0] is trainer.checkpoint_callback\n trainer.fit(model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_running_test_pretrained_model_distrib_dp(tmpdir):\n \"\"\"Verify `test()` on pretrained model.\"\"\"\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n\n # exp file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n gpus=[0, 1],\n accelerator='dp',\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n # run test set\n new_trainer = Trainer(**trainer_options)\n results = new_trainer.test(pretrained_model)\n pretrained_model.cpu()\n\n # test we have good test accuracy\n acc = results[0]['test_acc']\n assert acc > 0.5, f\"Model failed to get expected {0.5} accuracy. test_acc = {acc}\"\n\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n tpipes.run_prediction(dataloader, pretrained_model)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_running_test_pretrained_model_distrib_ddp_spawn(tmpdir):\n \"\"\"Verify `test()` on pretrained model.\"\"\"\n tutils.set_random_master_port()\n\n model = EvalModelTemplate()\n\n # exp file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n gpus=[0, 1],\n accelerator='ddp_spawn',\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n log.info(os.listdir(tutils.get_data_path(logger, path_dir=tmpdir)))\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n # run test set\n new_trainer = Trainer(**trainer_options)\n results = new_trainer.test(pretrained_model)\n pretrained_model.cpu()\n\n acc = results[0]['test_acc']\n assert acc > 0.5, f\"Model failed to get expected {0.5} accuracy. test_acc = {acc}\"\n\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n tpipes.run_prediction(dataloader, pretrained_model)\n\n\ndef test_running_test_pretrained_model_cpu(tmpdir):\n \"\"\"Verify test() on pretrained model.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=3,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[checkpoint],\n logger=logger,\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n pretrained_model = EvalModelTemplate.load_from_checkpoint(trainer.checkpoint_callback.best_model_path)\n\n new_trainer = Trainer(**trainer_options)\n new_trainer.test(pretrained_model)\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(new_trainer)\n\n\[email protected]('model_template', [EvalModelTemplate, GenericEvalModelTemplate])\ndef test_load_model_from_checkpoint(tmpdir, model_template):\n \"\"\"Verify test() on pretrained model.\"\"\"\n hparams = model_template.get_default_hparams()\n model = model_template(**hparams)\n\n trainer_options = dict(\n progress_bar_refresh_rate=0,\n max_epochs=2,\n limit_train_batches=0.4,\n limit_val_batches=0.2,\n callbacks=[ModelCheckpoint(dirpath=tmpdir, monitor='early_stop_on', save_top_k=-1)],\n default_root_dir=tmpdir,\n )\n\n # fit model\n trainer = Trainer(**trainer_options)\n result = trainer.fit(model)\n trainer.test(ckpt_path=None)\n\n # correct result and ok accuracy\n assert result == 1, 'training failed to complete'\n\n # load last checkpoint\n last_checkpoint = sorted(glob.glob(os.path.join(trainer.checkpoint_callback.dirpath, \"*.ckpt\")))[-1]\n\n # Since `EvalModelTemplate` has `_save_hparams = True` by default, check that ckpt has hparams\n ckpt = torch.load(last_checkpoint)\n assert model_template.CHECKPOINT_HYPER_PARAMS_KEY in ckpt.keys(), 'hyper_parameters missing from checkpoints'\n\n # Ensure that model can be correctly restored from checkpoint\n pretrained_model = model_template.load_from_checkpoint(last_checkpoint)\n\n # test that hparams loaded correctly\n for k, v in hparams.items():\n assert getattr(pretrained_model, k) == v\n\n # assert weights are the same\n for (old_name, old_p), (new_name, new_p) in zip(model.named_parameters(), pretrained_model.named_parameters()):\n assert torch.all(torch.eq(old_p, new_p)), 'loaded weights are not the same as the saved weights'\n\n # Check `test` on pretrained model:\n new_trainer = Trainer(**trainer_options)\n new_trainer.test(pretrained_model)\n\n # test we have good test accuracy\n tutils.assert_ok_model_acc(new_trainer)\n\n\[email protected](torch.cuda.device_count() < 2, reason=\"test requires multi-GPU machine\")\ndef test_dp_resume(tmpdir):\n \"\"\"Make sure DP continues training correctly.\"\"\"\n hparams = EvalModelTemplate.get_default_hparams()\n model = EvalModelTemplate(**hparams)\n\n trainer_options = dict(max_epochs=1, gpus=2, accelerator='dp', default_root_dir=tmpdir)\n\n # get logger\n logger = tutils.get_default_logger(tmpdir)\n\n # exp file to get weights\n # logger file to get weights\n checkpoint = tutils.init_checkpoint_callback(logger)\n\n # add these to the trainer options\n trainer_options['logger'] = logger\n trainer_options['checkpoint_callback'] = checkpoint\n\n # fit model\n trainer = Trainer(**trainer_options)\n trainer.is_slurm_managing_tasks = True\n result = trainer.fit(model)\n\n # track epoch before saving. Increment since we finished the current epoch, don't want to rerun\n real_global_epoch = trainer.current_epoch + 1\n\n # correct result and ok accuracy\n assert result == 1, 'amp + dp model failed to complete'\n\n # ---------------------------\n # HPC LOAD/SAVE\n # ---------------------------\n # save\n trainer.checkpoint_connector.hpc_save(tmpdir, logger)\n\n # init new trainer\n new_logger = tutils.get_default_logger(tmpdir, version=logger.version)\n trainer_options['logger'] = new_logger\n trainer_options['checkpoint_callback'] = ModelCheckpoint(dirpath=tmpdir)\n trainer_options['limit_train_batches'] = 0.5\n trainer_options['limit_val_batches'] = 0.2\n trainer_options['max_epochs'] = 1\n new_trainer = Trainer(**trainer_options)\n\n # set the epoch start hook so we can predict before the model does the full training\n def assert_good_acc():\n assert new_trainer.current_epoch == real_global_epoch and new_trainer.current_epoch > 0\n\n # if model and state loaded correctly, predictions will be good even though we\n # haven't trained with the new loaded model\n dp_model = new_trainer.model\n dp_model.eval()\n\n dataloader = trainer.train_dataloader\n tpipes.run_prediction(dataloader, dp_model, dp=True)\n\n # new model\n model = EvalModelTemplate(**hparams)\n model.on_train_start = assert_good_acc\n\n # fit new model which should load hpc weights\n new_trainer.fit(model)\n\n # test freeze on gpu\n model.freeze()\n model.unfreeze()\n\n\ndef test_model_saving_loading(tmpdir):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n max_epochs=1,\n logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n default_root_dir=tmpdir,\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1, 'amp + ddp model failed to complete'\n\n # make a prediction\n dataloaders = model.test_dataloader()\n if not isinstance(dataloaders, list):\n dataloaders = [dataloaders]\n\n for dataloader in dataloaders:\n for batch in dataloader:\n break\n\n x, y = batch\n x = x.view(x.size(0), -1)\n\n # generate preds before saving model\n model.eval()\n pred_before_saving = model(x)\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = tutils.get_data_path(logger, path_dir=tmpdir)\n hparams_path = os.path.join(hparams_path, 'hparams.yaml')\n model_2 = EvalModelTemplate.load_from_checkpoint(checkpoint_path=new_weights_path, hparams_file=hparams_path,)\n model_2.eval()\n\n # make prediction\n # assert that both predictions are the same\n new_pred = model_2(x)\n assert torch.all(torch.eq(pred_before_saving, new_pred)).item() == 1\n\n\[email protected]('url_ckpt', [True, False])\ndef test_strict_model_load_more_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv('TORCH_HOME', tmpdir)\n\n model = EvalModelTemplate()\n # Extra layer\n model.c_d3 = torch.nn.Linear(model.hidden_dim, model.hidden_dim)\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=1, logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')\n hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'\n ckpt_path = hparams_url if url_ckpt else new_weights_path\n\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,\n )\n\n with pytest.raises(RuntimeError, match=r'Unexpected key\\(s\\) in state_dict: \"c_d3.weight\", \"c_d3.bias\"'):\n EvalModelTemplate.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,\n )\n\n\[email protected]('url_ckpt', [True, False])\ndef test_strict_model_load_less_params(monkeypatch, tmpdir, tmpdir_server, url_ckpt):\n \"\"\"Tests use case where trainer saves the model, and user loads it from tags independently.\"\"\"\n # set $TORCH_HOME, which determines torch hub's cache path, to tmpdir\n monkeypatch.setenv('TORCH_HOME', tmpdir)\n\n model = EvalModelTemplate()\n\n # logger file to get meta\n logger = tutils.get_default_logger(tmpdir)\n\n # fit model\n trainer = Trainer(\n default_root_dir=tmpdir, max_epochs=1, logger=logger,\n callbacks=[ModelCheckpoint(dirpath=tmpdir)],\n )\n result = trainer.fit(model)\n\n # traning complete\n assert result == 1\n\n # save model\n new_weights_path = os.path.join(tmpdir, 'save_test.ckpt')\n trainer.save_checkpoint(new_weights_path)\n\n # load new model\n hparams_path = os.path.join(tutils.get_data_path(logger, path_dir=tmpdir), 'hparams.yaml')\n hparams_url = f'http://{tmpdir_server[0]}:{tmpdir_server[1]}/{os.path.basename(new_weights_path)}'\n ckpt_path = hparams_url if url_ckpt else new_weights_path\n\n class CurrentModel(EvalModelTemplate):\n def __init__(self):\n super().__init__()\n self.c_d3 = torch.nn.Linear(7, 7)\n\n CurrentModel.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=False,\n )\n\n with pytest.raises(RuntimeError, match=r'Missing key\\(s\\) in state_dict: \"c_d3.weight\", \"c_d3.bias\"'):\n CurrentModel.load_from_checkpoint(\n checkpoint_path=ckpt_path, hparams_file=hparams_path, strict=True,\n )\n\n\ndef test_model_pickle(tmpdir):\n model = EvalModelTemplate()\n pickle.dumps(model)\n cloudpickle.dumps(model)\n"
] | [
[
"torch.eq",
"torch.nn.Linear",
"torch.cuda.device_count",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
fanzhiyan/magenta | [
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6",
"622c47c19bb84c6f57b286ed03b738516b2f27d6"
] | [
"magenta/common/nade_test.py",
"magenta/models/arbitrary_image_stylization/arbitrary_image_stylization_losses.py",
"magenta/models/shared/events_rnn_model.py",
"magenta/models/onsets_frames_transcription/create_dataset_maestro.py",
"magenta/models/polyphony_rnn/polyphony_lib_test.py",
"magenta/models/coconet/lib_data.py",
"magenta/pipelines/pipeline_test.py",
"magenta/models/svg_vae/glyphazzn.py",
"magenta/models/performance_rnn/performance_rnn_create_dataset_test.py",
"magenta/models/music_vae/music_vae_generate.py"
] | [
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for nade.\"\"\"\n\nfrom magenta.common.nade import Nade\nimport tensorflow as tf\n\n\nclass NadeTest(tf.test.TestCase):\n\n def testInternalBias(self):\n batch_size = 4\n num_hidden = 6\n num_dims = 8\n test_inputs = tf.random_normal(shape=(batch_size, num_dims))\n nade = Nade(num_dims, num_hidden, internal_bias=True)\n log_prob, cond_probs = nade.log_prob(test_inputs)\n sample, sample_prob = nade.sample(n=batch_size)\n with self.test_session() as sess:\n sess.run([tf.global_variables_initializer()])\n self.assertEqual(log_prob.eval().shape, (batch_size,))\n self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample_prob.eval().shape, (batch_size,))\n\n def testExternalBias(self):\n batch_size = 4\n num_hidden = 6\n num_dims = 8\n test_inputs = tf.random_normal(shape=(batch_size, num_dims))\n test_b_enc = tf.random_normal(shape=(batch_size, num_hidden))\n test_b_dec = tf.random_normal(shape=(batch_size, num_dims))\n\n nade = Nade(num_dims, num_hidden)\n log_prob, cond_probs = nade.log_prob(test_inputs, test_b_enc, test_b_dec)\n sample, sample_prob = nade.sample(b_enc=test_b_enc, b_dec=test_b_dec)\n with self.test_session() as sess:\n sess.run([tf.global_variables_initializer()])\n self.assertEqual(log_prob.eval().shape, (batch_size,))\n self.assertEqual(cond_probs.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample.eval().shape, (batch_size, num_dims))\n self.assertEqual(sample_prob.eval().shape, (batch_size,))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Loss methods for real-time arbitrary image stylization model.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom magenta.models.image_stylization import learning as learning_utils\nfrom magenta.models.image_stylization import vgg\nimport numpy as np\nimport tensorflow as tf\n\n\ndef total_loss(content_inputs, style_inputs, stylized_inputs, content_weights,\n style_weights, total_variation_weight, reuse=False):\n \"\"\"Computes the total loss function.\n\n The total loss function is composed of a content, a style and a total\n variation term.\n\n Args:\n content_inputs: Tensor. The input images.\n style_inputs: Tensor. The input images.\n stylized_inputs: Tensor. The stylized input images.\n content_weights: dict mapping layer names to their associated content loss\n weight. Keys that are missing from the dict won't have their content\n loss computed.\n style_weights: dict mapping layer names to their associated style loss\n weight. Keys that are missing from the dict won't have their style\n loss computed.\n total_variation_weight: float. Coefficient for the total variation part of\n the loss.\n reuse: bool. Whether to reuse model parameters. Defaults to False.\n\n Returns:\n Tensor for the total loss, dict mapping loss names to losses.\n \"\"\"\n # Propagate the input and its stylized version through VGG16.\n with tf.name_scope('content_endpoints'):\n content_end_points = vgg.vgg_16(content_inputs, reuse=reuse)\n with tf.name_scope('style_endpoints'):\n style_end_points = vgg.vgg_16(style_inputs, reuse=True)\n with tf.name_scope('stylized_endpoints'):\n stylized_end_points = vgg.vgg_16(stylized_inputs, reuse=True)\n\n # Compute the content loss\n with tf.name_scope('content_loss'):\n total_content_loss, content_loss_dict = content_loss(\n content_end_points, stylized_end_points, content_weights)\n\n # Compute the style loss\n with tf.name_scope('style_loss'):\n total_style_loss, style_loss_dict = style_loss(\n style_end_points, stylized_end_points, style_weights)\n\n # Compute the total variation loss\n with tf.name_scope('total_variation_loss'):\n tv_loss, total_variation_loss_dict = learning_utils.total_variation_loss(\n stylized_inputs, total_variation_weight)\n\n # Compute the total loss\n with tf.name_scope('total_loss'):\n loss = total_content_loss + total_style_loss + tv_loss\n\n loss_dict = {'total_loss': loss}\n loss_dict.update(content_loss_dict)\n loss_dict.update(style_loss_dict)\n loss_dict.update(total_variation_loss_dict)\n\n return loss, loss_dict\n\n\ndef content_loss(end_points, stylized_end_points, content_weights):\n \"\"\"Content loss.\n\n Args:\n end_points: dict mapping VGG16 layer names to their corresponding Tensor\n value for the original input.\n stylized_end_points: dict mapping VGG16 layer names to their corresponding\n Tensor value for the stylized input.\n content_weights: dict mapping layer names to their associated content loss\n weight. Keys that are missing from the dict won't have their content\n loss computed.\n\n Returns:\n Tensor for the total content loss, dict mapping loss names to losses.\n \"\"\"\n total_content_loss = np.float32(0.0)\n content_loss_dict = {}\n\n for name, weight in content_weights.items():\n loss = tf.reduce_mean(\n (end_points[name] - stylized_end_points[name]) ** 2)\n weighted_loss = weight * loss\n\n content_loss_dict['content_loss/' + name] = loss\n content_loss_dict['weighted_content_loss/' + name] = weighted_loss\n total_content_loss += weighted_loss\n\n content_loss_dict['total_content_loss'] = total_content_loss\n\n return total_content_loss, content_loss_dict\n\n\ndef style_loss(style_end_points, stylized_end_points, style_weights):\n \"\"\"Style loss.\n\n Args:\n style_end_points: dict mapping VGG16 layer names to their corresponding\n Tensor value for the style input.\n stylized_end_points: dict mapping VGG16 layer names to their corresponding\n Tensor value for the stylized input.\n style_weights: dict mapping layer names to their associated style loss\n weight. Keys that are missing from the dict won't have their style\n loss computed.\n\n Returns:\n Tensor for the total style loss, dict mapping loss names to losses.\n \"\"\"\n total_style_loss = np.float32(0.0)\n style_loss_dict = {}\n\n for name, weight in style_weights.items():\n loss = tf.reduce_mean(\n (learning_utils.gram_matrix(stylized_end_points[name]) -\n learning_utils.gram_matrix(style_end_points[name])) ** 2)\n weighted_loss = weight * loss\n\n style_loss_dict['style_loss/' + name] = loss\n style_loss_dict['weighted_style_loss/' + name] = weighted_loss\n total_style_loss += weighted_loss\n\n style_loss_dict['total_style_loss'] = total_style_loss\n\n return total_style_loss, style_loss_dict\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Event sequence RNN model.\"\"\"\n\nimport collections\nimport copy\nimport functools\n\nfrom magenta.common import beam_search\nfrom magenta.common import state_util\nfrom magenta.models.shared import events_rnn_graph\nimport magenta.music as mm\nimport numpy as np\nfrom six.moves import range # pylint: disable=redefined-builtin\nimport tensorflow as tf\nfrom tensorflow.contrib import training as contrib_training\n\n# Model state when generating event sequences, consisting of the next inputs to\n# feed the model, the current RNN state, the current control sequence (if\n# applicable), and state for the current control sequence (if applicable).\nModelState = collections.namedtuple(\n 'ModelState', ['inputs', 'rnn_state', 'control_events', 'control_state'])\n\n\nclass EventSequenceRnnModelError(Exception):\n pass\n\n\ndef _extend_control_events_default(control_events, events, state):\n \"\"\"Default function for extending control event sequence.\n\n This function extends a control event sequence by duplicating the final event\n in the sequence. The control event sequence will be extended to have length\n one longer than the generated event sequence.\n\n Args:\n control_events: The control event sequence to extend.\n events: The list of generated events.\n state: State maintained while generating, unused.\n\n Returns:\n The resulting state after extending the control sequence (in this case the\n state will be returned unmodified).\n \"\"\"\n while len(control_events) <= len(events):\n control_events.append(control_events[-1])\n return state\n\n\nclass EventSequenceRnnModel(mm.BaseModel):\n \"\"\"Class for RNN event sequence generation models.\n\n Currently this class only supports generation, of both event sequences and\n note sequences (via event sequences). Support for model training will be added\n at a later time.\n \"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize the EventSequenceRnnModel.\n\n Args:\n config: An EventSequenceRnnConfig containing the encoder/decoder and\n HParams to use.\n \"\"\"\n super(EventSequenceRnnModel, self).__init__()\n self._config = config\n\n def _build_graph_for_generation(self):\n events_rnn_graph.get_build_graph_fn('generate', self._config)()\n\n def _batch_size(self):\n \"\"\"Extracts the batch size from the graph.\"\"\"\n return self._session.graph.get_collection('inputs')[0].shape[0].value\n\n def _generate_step_for_batch(self, event_sequences, inputs, initial_state,\n temperature):\n \"\"\"Extends a batch of event sequences by a single step each.\n\n This method modifies the event sequences in place.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`. These are extended by this method.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n temperature: The softmax temperature.\n\n Returns:\n final_state: The final RNN state, a numpy array the same size as\n `initial_state`.\n loglik: The log-likelihood of the chosen softmax value for each event\n sequence, a 1-D numpy array of length\n `self._batch_size()`. If `inputs` is a full-length inputs batch, the\n log-likelihood of each entire sequence up to and including the\n generated step will be computed and returned.\n \"\"\"\n assert len(event_sequences) == self._batch_size()\n\n graph_inputs = self._session.graph.get_collection('inputs')[0]\n graph_initial_state = self._session.graph.get_collection('initial_state')\n graph_final_state = self._session.graph.get_collection('final_state')\n graph_softmax = self._session.graph.get_collection('softmax')[0]\n graph_temperature = self._session.graph.get_collection('temperature')\n\n feed_dict = {graph_inputs: inputs,\n tuple(graph_initial_state): initial_state}\n # For backwards compatibility, we only try to pass temperature if the\n # placeholder exists in the graph.\n if graph_temperature:\n feed_dict[graph_temperature[0]] = temperature\n final_state, softmax = self._session.run(\n [graph_final_state, graph_softmax], feed_dict)\n\n if isinstance(softmax, list):\n if softmax[0].shape[1] > 1:\n softmaxes = []\n for beam in range(softmax[0].shape[0]):\n beam_softmaxes = []\n for event in range(softmax[0].shape[1] - 1):\n beam_softmaxes.append(\n [softmax[s][beam, event] for s in range(len(softmax))])\n softmaxes.append(beam_softmaxes)\n loglik = self._config.encoder_decoder.evaluate_log_likelihood(\n event_sequences, softmaxes)\n else:\n loglik = np.zeros(len(event_sequences))\n else:\n if softmax.shape[1] > 1:\n # The inputs batch is longer than a single step, so we also want to\n # compute the log-likelihood of the event sequences up until the step\n # we're generating.\n loglik = self._config.encoder_decoder.evaluate_log_likelihood(\n event_sequences, softmax[:, :-1, :])\n else:\n loglik = np.zeros(len(event_sequences))\n\n indices = np.array(self._config.encoder_decoder.extend_event_sequences(\n event_sequences, softmax))\n if isinstance(softmax, list):\n p = 1.0\n for i in range(len(softmax)):\n p *= softmax[i][range(len(event_sequences)), -1, indices[:, i]]\n else:\n p = softmax[range(len(event_sequences)), -1, indices]\n\n return final_state, loglik + np.log(p)\n\n def _generate_step(self, event_sequences, model_states, logliks, temperature,\n extend_control_events_callback=None,\n modify_events_callback=None):\n \"\"\"Extends a list of event sequences by a single step each.\n\n This method modifies the event sequences in place. It also returns the\n modified event sequences and updated model states and log-likelihoods.\n\n Args:\n event_sequences: A list of event sequence objects, which are extended by\n this method.\n model_states: A list of model states, each of which contains model inputs\n and initial RNN states.\n logliks: A list containing the current log-likelihood for each event\n sequence.\n temperature: The softmax temperature.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n event_sequences: A list of extended event sequences. These are modified in\n place but also returned.\n final_states: A list of resulting model states, containing model inputs\n for the next step along with RNN states for each event sequence.\n logliks: A list containing the updated log-likelihood for each event\n sequence.\n \"\"\"\n # Split the sequences to extend into batches matching the model batch size.\n batch_size = self._batch_size()\n num_seqs = len(event_sequences)\n num_batches = int(np.ceil(num_seqs / float(batch_size)))\n\n # Extract inputs and RNN states from the model states.\n inputs = [model_state.inputs for model_state in model_states]\n initial_states = [model_state.rnn_state for model_state in model_states]\n\n # Also extract control sequences and states.\n control_sequences = [\n model_state.control_events for model_state in model_states]\n control_states = [\n model_state.control_state for model_state in model_states]\n\n final_states = []\n logliks = np.array(logliks, dtype=np.float32)\n\n # Add padding to fill the final batch.\n pad_amt = -len(event_sequences) % batch_size\n padded_event_sequences = event_sequences + [\n copy.deepcopy(event_sequences[-1]) for _ in range(pad_amt)]\n padded_inputs = inputs + [inputs[-1]] * pad_amt\n padded_initial_states = initial_states + [initial_states[-1]] * pad_amt\n\n for b in range(num_batches):\n i, j = b * batch_size, (b + 1) * batch_size\n pad_amt = max(0, j - num_seqs)\n # Generate a single step for one batch of event sequences.\n batch_final_state, batch_loglik = self._generate_step_for_batch(\n padded_event_sequences[i:j],\n padded_inputs[i:j],\n state_util.batch(padded_initial_states[i:j], batch_size),\n temperature)\n final_states += state_util.unbatch(\n batch_final_state, batch_size)[:j - i - pad_amt]\n logliks[i:j - pad_amt] += batch_loglik[:j - i - pad_amt]\n\n # Construct inputs for next step.\n if extend_control_events_callback is not None:\n # We are conditioning on control sequences.\n for idx in range(len(control_sequences)):\n # Extend each control sequence to ensure that it is longer than the\n # corresponding event sequence.\n control_states[idx] = extend_control_events_callback(\n control_sequences[idx], event_sequences[idx], control_states[idx])\n next_inputs = self._config.encoder_decoder.get_inputs_batch(\n control_sequences, event_sequences)\n else:\n next_inputs = self._config.encoder_decoder.get_inputs_batch(\n event_sequences)\n\n if modify_events_callback:\n # Modify event sequences and inputs for next step.\n modify_events_callback(\n self._config.encoder_decoder, event_sequences, next_inputs)\n\n model_states = [ModelState(inputs=inputs, rnn_state=final_state,\n control_events=control_events,\n control_state=control_state)\n for inputs, final_state, control_events, control_state\n in zip(next_inputs, final_states,\n control_sequences, control_states)]\n\n return event_sequences, model_states, logliks\n\n def _generate_events(self, num_steps, primer_events, temperature=1.0,\n beam_size=1, branch_factor=1, steps_per_iteration=1,\n control_events=None, control_state=None,\n extend_control_events_callback=(\n _extend_control_events_default),\n modify_events_callback=None):\n \"\"\"Generate an event sequence from a primer sequence.\n\n Args:\n num_steps: The integer length in steps of the final event sequence, after\n generation. Includes the primer.\n primer_events: The primer event sequence, a Python list-like object.\n temperature: A float specifying how much to divide the logits by\n before computing the softmax. Greater than 1.0 makes events more\n random, less than 1.0 makes events less random.\n beam_size: An integer, beam size to use when generating event sequences\n via beam search.\n branch_factor: An integer, beam search branch factor to use.\n steps_per_iteration: An integer, number of steps to take per beam search\n iteration.\n control_events: A sequence of control events upon which to condition the\n generation. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the control events will be\n used along with the target sequence to generate model inputs. In some\n cases, the control event sequence cannot be fully-determined as later\n control events depend on earlier generated events; use the\n `extend_control_events_callback` argument to provide a function that\n extends the control event sequence.\n control_state: Initial state used by `extend_control_events_callback`.\n extend_control_events_callback: A function that takes three arguments: a\n current control event sequence, a current generated event sequence,\n and the control state. The function should a) extend the control event\n sequence to be one longer than the generated event sequence (or do\n nothing if it is already at least this long), and b) return the\n resulting control state.\n modify_events_callback: An optional callback for modifying the event list.\n Can be used to inject events rather than having them generated. If not\n None, will be called with 3 arguments after every event: the current\n EventSequenceEncoderDecoder, a list of current EventSequences, and a\n list of current encoded event inputs.\n\n Returns:\n The generated event sequence (which begins with the provided primer).\n\n Raises:\n EventSequenceRnnModelError: If the primer sequence has zero length or\n is not shorter than num_steps.\n \"\"\"\n if (control_events is not None and\n not isinstance(self._config.encoder_decoder,\n mm.ConditionalEventSequenceEncoderDecoder)):\n raise EventSequenceRnnModelError(\n 'control sequence provided but encoder/decoder is not a '\n 'ConditionalEventSequenceEncoderDecoder')\n if control_events is not None and extend_control_events_callback is None:\n raise EventSequenceRnnModelError(\n 'must provide callback for extending control sequence (or use'\n 'default)')\n\n if not primer_events:\n raise EventSequenceRnnModelError(\n 'primer sequence must have non-zero length')\n if len(primer_events) >= num_steps:\n raise EventSequenceRnnModelError(\n 'primer sequence must be shorter than `num_steps`')\n\n if len(primer_events) >= num_steps:\n # Sequence is already long enough, no need to generate.\n return primer_events\n\n event_sequences = [copy.deepcopy(primer_events)]\n\n # Construct inputs for first step after primer.\n if control_events is not None:\n # We are conditioning on a control sequence. Make sure it is longer than\n # the primer sequence.\n control_state = extend_control_events_callback(\n control_events, primer_events, control_state)\n inputs = self._config.encoder_decoder.get_inputs_batch(\n [control_events], event_sequences, full_length=True)\n else:\n inputs = self._config.encoder_decoder.get_inputs_batch(\n event_sequences, full_length=True)\n\n if modify_events_callback:\n # Modify event sequences and inputs for first step after primer.\n modify_events_callback(\n self._config.encoder_decoder, event_sequences, inputs)\n\n graph_initial_state = self._session.graph.get_collection('initial_state')\n initial_states = state_util.unbatch(self._session.run(graph_initial_state))\n\n # Beam search will maintain a state for each sequence consisting of the next\n # inputs to feed the model, and the current RNN state. We start out with the\n # initial full inputs batch and the zero state.\n initial_state = ModelState(\n inputs=inputs[0], rnn_state=initial_states[0],\n control_events=control_events, control_state=control_state)\n\n generate_step_fn = functools.partial(\n self._generate_step,\n temperature=temperature,\n extend_control_events_callback=\n extend_control_events_callback if control_events is not None else None,\n modify_events_callback=modify_events_callback)\n\n events, _, loglik = beam_search(\n initial_sequence=event_sequences[0],\n initial_state=initial_state,\n generate_step_fn=generate_step_fn,\n num_steps=num_steps - len(primer_events),\n beam_size=beam_size,\n branch_factor=branch_factor,\n steps_per_iteration=steps_per_iteration)\n\n tf.logging.info('Beam search yields sequence with log-likelihood: %f ',\n loglik)\n\n return events\n\n def _evaluate_batch_log_likelihood(self, event_sequences, inputs,\n initial_state):\n \"\"\"Evaluates the log likelihood of a batch of event sequences.\n\n Args:\n event_sequences: A list of event sequences, each of which is a Python\n list-like object. The list of event sequences should have length equal\n to `self._batch_size()`.\n inputs: A Python list of model inputs, with length equal to\n `self._batch_size()`.\n initial_state: A numpy array containing the initial RNN state, where\n `initial_state.shape[0]` is equal to `self._batch_size()`.\n\n Returns:\n A Python list containing the log likelihood of each sequence in\n `event_sequences`.\n \"\"\"\n graph_inputs = self._session.graph.get_collection('inputs')[0]\n graph_initial_state = self._session.graph.get_collection('initial_state')\n graph_softmax = self._session.graph.get_collection('softmax')[0]\n graph_temperature = self._session.graph.get_collection('temperature')\n\n feed_dict = {graph_inputs: inputs,\n tuple(graph_initial_state): initial_state}\n # For backwards compatibility, we only try to pass temperature if the\n # placeholder exists in the graph.\n if graph_temperature:\n feed_dict[graph_temperature[0]] = 1.0\n softmax = self._session.run(graph_softmax, feed_dict)\n\n return self._config.encoder_decoder.evaluate_log_likelihood(\n event_sequences, softmax)\n\n def _evaluate_log_likelihood(self, event_sequences, control_events=None):\n \"\"\"Evaluate log likelihood for a list of event sequences of the same length.\n\n Args:\n event_sequences: A list of event sequences for which to evaluate the log\n likelihood.\n control_events: A sequence of control events upon which to condition the\n event sequences. If not None, the encoder/decoder should be a\n ConditionalEventSequenceEncoderDecoder, and the log likelihood of each\n event sequence will be computed conditional on the control sequence.\n\n Returns:\n The log likelihood of each sequence in `event_sequences`.\n\n Raises:\n EventSequenceRnnModelError: If the event sequences are not all the\n same length, or if the control sequence is shorter than the event\n sequences.\n \"\"\"\n num_steps = len(event_sequences[0])\n for events in event_sequences[1:]:\n if len(events) != num_steps:\n raise EventSequenceRnnModelError(\n 'log likelihood evaluation requires all event sequences to have '\n 'the same length')\n if control_events is not None and len(control_events) < num_steps:\n raise EventSequenceRnnModelError(\n 'control sequence must be at least as long as the event sequences')\n\n batch_size = self._batch_size()\n num_full_batches = len(event_sequences) / batch_size\n\n loglik = np.empty(len(event_sequences))\n\n # Since we're computing log-likelihood and not generating, the inputs batch\n # doesn't need to include the final event in each sequence.\n if control_events is not None:\n # We are conditioning on a control sequence.\n inputs = self._config.encoder_decoder.get_inputs_batch(\n [control_events] * len(event_sequences),\n [events[:-1] for events in event_sequences],\n full_length=True)\n else:\n inputs = self._config.encoder_decoder.get_inputs_batch(\n [events[:-1] for events in event_sequences], full_length=True)\n\n graph_initial_state = self._session.graph.get_collection('initial_state')\n initial_state = [\n self._session.run(graph_initial_state)] * len(event_sequences)\n offset = 0\n for _ in range(num_full_batches):\n # Evaluate a single step for one batch of event sequences.\n batch_indices = range(offset, offset + batch_size)\n batch_loglik = self._evaluate_batch_log_likelihood(\n [event_sequences[i] for i in batch_indices],\n [inputs[i] for i in batch_indices],\n initial_state[batch_indices])\n loglik[batch_indices] = batch_loglik\n offset += batch_size\n\n if offset < len(event_sequences):\n # There's an extra non-full batch. Pad it with a bunch of copies of the\n # final sequence.\n num_extra = len(event_sequences) - offset\n pad_size = batch_size - num_extra\n batch_indices = range(offset, len(event_sequences))\n batch_loglik = self._evaluate_batch_log_likelihood(\n [event_sequences[i] for i in batch_indices] + [\n copy.deepcopy(event_sequences[-1]) for _ in range(pad_size)],\n [inputs[i] for i in batch_indices] + inputs[-1] * pad_size,\n np.append(initial_state[batch_indices],\n np.tile(inputs[-1, :], (pad_size, 1)),\n axis=0))\n loglik[batch_indices] = batch_loglik[0:num_extra]\n\n return loglik\n\n\nclass EventSequenceRnnConfig(object):\n \"\"\"Stores a configuration for an event sequence RNN.\n\n Only one of `steps_per_quarter` or `steps_per_second` will be applicable for\n any particular model.\n\n Attributes:\n details: The GeneratorDetails message describing the config.\n encoder_decoder: The EventSequenceEncoderDecoder or\n ConditionalEventSequenceEncoderDecoder object to use.\n hparams: The HParams containing hyperparameters to use. Will be merged with\n default hyperparameter values.\n steps_per_quarter: The integer number of quantized time steps per quarter\n note to use.\n steps_per_second: The integer number of quantized time steps per second to\n use.\n \"\"\"\n\n def __init__(self, details, encoder_decoder, hparams,\n steps_per_quarter=4, steps_per_second=100):\n hparams_dict = {\n 'batch_size': 64,\n 'rnn_layer_sizes': [128, 128],\n 'dropout_keep_prob': 1.0,\n 'attn_length': 0,\n 'clip_norm': 3,\n 'learning_rate': 0.001,\n 'residual_connections': False,\n 'use_cudnn': False\n }\n hparams_dict.update(hparams.values())\n\n self.details = details\n self.encoder_decoder = encoder_decoder\n self.hparams = contrib_training.HParams(**hparams_dict)\n self.steps_per_quarter = steps_per_quarter\n self.steps_per_second = steps_per_second\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Beam pipeline for MAESTRO dataset.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport hashlib\nimport os\n\nimport apache_beam as beam\nfrom apache_beam.metrics import Metrics\n\nfrom magenta.models.onsets_frames_transcription import audio_label_data_utils\nfrom magenta.models.onsets_frames_transcription import data\nfrom magenta.music import audio_io\nfrom magenta.protobuf import music_pb2\nimport numpy as np\n\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('output_directory', None, 'Path to output_directory')\ntf.app.flags.DEFINE_integer('min_length', 5, 'minimum length for a segment')\ntf.app.flags.DEFINE_integer('max_length', 20, 'maximum length for a segment')\ntf.app.flags.DEFINE_integer('sample_rate', 16000,\n 'sample_rate of the output files')\ntf.app.flags.DEFINE_boolean('preprocess_examples', False,\n 'Whether to preprocess examples.')\ntf.app.flags.DEFINE_integer(\n 'preprocess_train_example_multiplier', 1,\n 'How many times to run data preprocessing on each training example. '\n 'Useful if preprocessing involves a stochastic process that is useful to '\n 'sample multiple times.')\ntf.app.flags.DEFINE_string('config', 'onsets_frames',\n 'Name of the config to use.')\ntf.app.flags.DEFINE_string('dataset_config', 'maestro',\n 'Name of the dataset config to use.')\ntf.app.flags.DEFINE_string(\n 'hparams', '',\n 'A comma-separated list of `name=value` hyperparameter values.')\ntf.app.flags.DEFINE_string(\n 'pipeline_options', '--runner=DirectRunner',\n 'Command line flags to use in constructing the Beam pipeline options.')\ntf.app.flags.DEFINE_boolean(\n 'load_audio_with_librosa', False,\n 'Whether to use librosa for sampling audio')\n\n\ndef split_wav(input_example, min_length, max_length, sample_rate,\n debug_output_directory, split_example, load_audio_with_librosa):\n \"\"\"Splits wav and midi files for the dataset.\"\"\"\n tf.logging.info('Splitting %s',\n input_example.features.feature['id'].bytes_list.value[0])\n\n wav_data = input_example.features.feature['audio'].bytes_list.value[0]\n\n ns = music_pb2.NoteSequence.FromString(\n input_example.features.feature['sequence'].bytes_list.value[0])\n\n Metrics.counter('split_wav', 'read_midi_wav_to_split').inc()\n\n if not split_example:\n split_examples = audio_label_data_utils.process_record(\n wav_data,\n ns,\n ns.id,\n min_length=0,\n max_length=-1,\n sample_rate=sample_rate,\n load_audio_with_librosa=load_audio_with_librosa)\n\n for example in split_examples:\n Metrics.counter('split_wav', 'full_example').inc()\n yield example\n else:\n try:\n split_examples = audio_label_data_utils.process_record(\n wav_data,\n ns,\n ns.id,\n min_length=min_length,\n max_length=max_length,\n sample_rate=sample_rate,\n load_audio_with_librosa=load_audio_with_librosa)\n\n for example in split_examples:\n Metrics.counter('split_wav', 'split_example').inc()\n yield example\n except AssertionError:\n output_file = 'badexample-' + hashlib.md5(ns.id).hexdigest() + '.proto'\n output_path = os.path.join(debug_output_directory, output_file)\n tf.logging.error('Exception processing %s. Writing file to %s', ns.id,\n output_path)\n with tf.gfile.Open(output_path, 'w') as f:\n f.write(input_example.SerializeToString())\n raise\n\n\ndef multiply_example(ex, num_times):\n return [ex] * num_times\n\n\ndef preprocess_data(\n input_example, preprocess_example_fn, input_tensors_to_example_fn, hparams,\n process_for_training):\n \"\"\"Preprocess example using data.preprocess_data.\"\"\"\n with tf.Graph().as_default():\n example_proto = tf.constant(input_example.SerializeToString())\n\n input_tensors = preprocess_example_fn(\n example_proto=example_proto, hparams=hparams,\n is_training=process_for_training)\n\n with tf.Session() as sess:\n preprocessed = sess.run(input_tensors)\n\n example = input_tensors_to_example_fn(preprocessed, hparams)\n Metrics.counter('preprocess_data', 'preprocess_example').inc()\n return example\n\n\ndef generate_mixes(val, num_mixes, sourceid_to_exids):\n \"\"\"Generate lists of Example IDs to be mixed.\"\"\"\n del val\n rs = np.random.RandomState(seed=0) # Make the selection deterministic\n sourceid_to_exids_dict = collections.defaultdict(list)\n for sourceid, exid in sourceid_to_exids:\n sourceid_to_exids_dict[sourceid].append(exid)\n mixes = zip(\n *[rs.choice(k, num_mixes, replace=True).tolist()\n for k in sourceid_to_exids_dict.values()])\n keyed_mixes = dict(enumerate(mixes))\n exid_to_mixids = collections.defaultdict(list)\n for mixid, exids in keyed_mixes.items():\n for exid in exids:\n exid_to_mixids[exid].append(mixid)\n return exid_to_mixids\n\n\ndef mix_examples(mixid_exs, sample_rate, load_audio_with_librosa):\n \"\"\"Mix several Examples together to create a new example.\"\"\"\n mixid, exs = mixid_exs\n del mixid\n\n example_samples = []\n example_sequences = []\n\n for ex in exs:\n wav_data = ex.features.feature['audio'].bytes_list.value[0]\n if load_audio_with_librosa:\n samples = audio_io.wav_data_to_samples_librosa(wav_data, sample_rate)\n else:\n samples = audio_io.wav_data_to_samples(wav_data, sample_rate)\n example_samples.append(samples)\n ns = music_pb2.NoteSequence.FromString(\n ex.features.feature['sequence'].bytes_list.value[0])\n example_sequences.append(ns)\n\n mixed_samples, mixed_sequence = audio_label_data_utils.mix_sequences(\n individual_samples=example_samples, sample_rate=sample_rate,\n individual_sequences=example_sequences)\n\n mixed_wav_data = audio_io.samples_to_wav_data(mixed_samples, sample_rate)\n\n mixed_id = '::'.join(['mixed'] + [ns.id for ns in example_sequences])\n mixed_sequence.id = mixed_id\n mixed_filename = '::'.join(\n ['mixed'] + [ns.filename for ns in example_sequences])\n mixed_sequence.filename = mixed_filename\n\n examples = list(audio_label_data_utils.process_record(\n mixed_wav_data,\n mixed_sequence,\n mixed_id,\n min_length=0,\n max_length=-1,\n sample_rate=sample_rate))\n assert len(examples) == 1\n return examples[0]\n\n\ndef pipeline(config_map, dataset_config_map, preprocess_example_fn,\n input_tensors_to_example_fn):\n \"\"\"Pipeline for dataset creation.\"\"\"\n tf.flags.mark_flags_as_required(['output_directory'])\n\n pipeline_options = beam.options.pipeline_options.PipelineOptions(\n FLAGS.pipeline_options.split(','))\n\n config = config_map[FLAGS.config]\n hparams = config.hparams\n hparams.parse(FLAGS.hparams)\n\n datasets = dataset_config_map[FLAGS.dataset_config]\n\n if tf.gfile.Exists(FLAGS.output_directory):\n raise ValueError(\n 'Output directory %s already exists!' % FLAGS.output_directory)\n tf.gfile.MakeDirs(FLAGS.output_directory)\n with tf.gfile.Open(\n os.path.join(FLAGS.output_directory, 'config.txt'), 'w') as f:\n f.write('\\n\\n'.join([\n 'min_length: {}'.format(FLAGS.min_length),\n 'max_length: {}'.format(FLAGS.max_length),\n 'sample_rate: {}'.format(FLAGS.sample_rate),\n 'preprocess_examples: {}'.format(FLAGS.preprocess_examples),\n 'preprocess_train_example_multiplier: {}'.format(\n FLAGS.preprocess_train_example_multiplier),\n 'config: {}'.format(FLAGS.config),\n 'hparams: {}'.format(hparams.to_json(sort_keys=True)),\n 'dataset_config: {}'.format(FLAGS.dataset_config),\n 'datasets: {}'.format(datasets),\n ]))\n\n with beam.Pipeline(options=pipeline_options) as p:\n for dataset in datasets:\n if isinstance(dataset.path, (list, tuple)):\n # If dataset.path is a list, then it's a list of sources to mix together\n # to form new examples. First, do the mixing, then pass the results to\n # the rest of the pipeline.\n id_exs = []\n sourceid_to_exids = []\n for source_id, stem_path in enumerate(dataset.path):\n if dataset.num_mixes is None:\n raise ValueError(\n 'If path is not a list, num_mixes must not be None: {}'.format(\n dataset))\n stem_p = p | 'tfrecord_list_%s_%d' % (dataset.name, source_id) >> (\n beam.Create(data.generate_sharded_filenames(stem_path)))\n stem_p |= 'read_tfrecord_%s_%d' % (dataset.name, source_id) >> (\n beam.io.tfrecordio.ReadAllFromTFRecord(\n coder=beam.coders.ProtoCoder(tf.train.Example)))\n # Key all examples with a hash.\n def key_example(ex, source_id):\n # prefixing the hash with the source_id is critical because the same\n # dataset may be present multiple times and we want unique ids for\n # each entry.\n return (\n '{}-{}'.format(\n source_id,\n hashlib.sha256(ex.SerializeToString()).hexdigest()),\n ex)\n stem_p |= 'add_id_key_%s_%d' % (dataset.name, source_id) >> (\n beam.Map(key_example, source_id=source_id))\n id_exs.append(stem_p)\n\n # Create a list of source_id to example id.\n def sourceid_to_exid(id_ex, source_id):\n return (source_id, id_ex[0])\n sourceid_to_exids.append(\n stem_p | 'key_%s_%d' % (dataset.name, source_id) >> (\n beam.Map(sourceid_to_exid, source_id=source_id)))\n id_exs = id_exs | 'id_exs_flatten_%s' % dataset.name >> beam.Flatten()\n sourceid_to_exids = (\n sourceid_to_exids | 'sourceid_to_exids_flatten_%s' % dataset.name >>\n beam.Flatten())\n # Pass the list of source id to example IDs to generate_mixes,\n # which will create mixes by selecting random IDs from each source\n # (with replacement). This is represented as a list of example IDs\n # to Mix IDs.\n # Note: beam.Create([0]) is just a single dummy value to allow the\n # sourceid_to_exids to be passed in as a python list so we can do the\n # sampling with numpy.\n exid_to_mixids = (\n p\n | 'create_dummy_%s' % dataset.name >> beam.Create([0])\n | 'generate_mixes_%s' % dataset.name >> beam.Map(\n generate_mixes, num_mixes=dataset.num_mixes,\n sourceid_to_exids=beam.pvalue.AsList(sourceid_to_exids)))\n # Create a list of (Mix ID, Full Example proto). Note: Examples may be\n # present in more than one mix. Then, group by Mix ID.\n def mixid_to_exs(id_ex, exid_to_mixids):\n exid, ex = id_ex\n for mixid in exid_to_mixids[exid]:\n yield mixid, ex\n mixid_exs = (\n id_exs\n | 'mixid_to_exs_%s' % dataset.name >> beam.FlatMap(\n mixid_to_exs,\n exid_to_mixids=beam.pvalue.AsSingleton(exid_to_mixids))\n | 'group_by_key_%s' % dataset.name >> beam.GroupByKey())\n # Take these groups of Examples, mix their audio and sequences to return\n # a single new Example. Then, carry on with the rest of the pipeline\n # like normal.\n split_p = (\n mixid_exs\n | 'mix_examples_%s' % dataset.name >> beam.Map(\n mix_examples, FLAGS.sample_rate, FLAGS.load_audio_with_librosa))\n else:\n if dataset.num_mixes is not None:\n raise ValueError(\n 'If path is not a list, num_mixes must be None: {}'.format(\n dataset))\n split_p = p | 'tfrecord_list_%s' % dataset.name >> beam.Create(\n data.generate_sharded_filenames(dataset.path))\n split_p |= 'read_tfrecord_%s' % dataset.name >> (\n beam.io.tfrecordio.ReadAllFromTFRecord(\n coder=beam.coders.ProtoCoder(tf.train.Example)))\n split_p |= 'shuffle_input_%s' % dataset.name >> beam.Reshuffle()\n split_p |= 'split_wav_%s' % dataset.name >> beam.FlatMap(\n split_wav, min_length=FLAGS.min_length, max_length=FLAGS.max_length,\n sample_rate=FLAGS.sample_rate,\n debug_output_directory=FLAGS.output_directory,\n split_example=dataset.process_for_training,\n load_audio_with_librosa=FLAGS.load_audio_with_librosa)\n if FLAGS.preprocess_examples:\n if dataset.process_for_training:\n mul_name = 'preprocess_multiply_%dx_%s' % (\n FLAGS.preprocess_train_example_multiplier, dataset.name)\n split_p |= mul_name >> beam.FlatMap(\n multiply_example, FLAGS.preprocess_train_example_multiplier)\n split_p |= 'preprocess_%s' % dataset.name >> beam.Map(\n preprocess_data, preprocess_example_fn, input_tensors_to_example_fn,\n hparams, dataset.process_for_training)\n split_p |= 'shuffle_output_%s' % dataset.name >> beam.Reshuffle()\n split_p |= 'write_%s' % dataset.name >> beam.io.WriteToTFRecord(\n os.path.join(FLAGS.output_directory, '%s.tfrecord' % dataset.name),\n coder=beam.coders.ProtoCoder(tf.train.Example))\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for polyphony_lib.\"\"\"\n\nimport copy\n\nfrom magenta.common import testing_lib as common_testing_lib\nfrom magenta.models.polyphony_rnn import polyphony_lib\nfrom magenta.music import sequences_lib\nfrom magenta.music import testing_lib\nfrom magenta.protobuf import music_pb2\nimport tensorflow as tf\n\n\nclass PolyphonyLibTest(tf.test.TestCase):\n\n def setUp(self):\n self.maxDiff = None # pylint:disable=invalid-name\n\n self.note_sequence = common_testing_lib.parse_test_proto(\n music_pb2.NoteSequence,\n \"\"\"\n tempos: {\n qpm: 60\n }\n ticks_per_quarter: 220\n \"\"\")\n\n def testFromQuantizedNoteSequence(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n poly_seq = list(polyphony_lib.PolyphonicSequence(quantized_sequence))\n\n pe = polyphony_lib.PolyphonicEvent\n expected_poly_seq = [\n pe(pe.START, None),\n # step 0\n pe(pe.NEW_NOTE, 64),\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.NEW_NOTE, 67),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.STEP_END, None),\n # step 2\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.STEP_END, None),\n # step 3\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(expected_poly_seq, poly_seq)\n\n def testToSequence(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n poly_seq = polyphony_lib.PolyphonicSequence(quantized_sequence)\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0)\n\n # Make comparison easier\n poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))\n self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testToSequenceWithContinuedNotesNotStarted(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.CONTINUED_NOTE, 67), # Was not started, should be ignored.\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0)\n\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0)])\n\n # Make comparison easier\n poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))\n self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testToSequenceWithExtraEndEvents(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.END, None), # END event before end. Should be ignored.\n pe(pe.NEW_NOTE, 64),\n pe(pe.END, None), # END event before end. Should be ignored.\n pe(pe.STEP_END, None),\n pe(pe.END, None), # END event before end. Should be ignored.\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.END, None), # END event before end. Should be ignored.\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.END, None), # END event before end. Should be ignored.\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0)\n\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 2.0), (64, 100, 0.0, 2.0)])\n\n # Make comparison easier\n poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))\n self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testToSequenceWithUnfinishedSequence(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n # missing STEP_END and END events at end of sequence.\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n with self.assertRaises(ValueError):\n poly_seq.to_sequence(qpm=60.0)\n\n def testToSequenceWithRepeatedNotes(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.NEW_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0)\n\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 1.0), (64, 100, 0.0, 2.0), (60, 100, 1.0, 2.0)])\n\n # Make comparison easier\n poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))\n self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testToSequenceWithBaseNoteSequence(self):\n poly_seq = polyphony_lib.PolyphonicSequence(\n steps_per_quarter=1, start_step=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n base_seq = copy.deepcopy(self.note_sequence)\n testing_lib.add_track_to_sequence(\n base_seq, 0, [(60, 100, 0.0, 1.0)])\n\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0, base_note_sequence=base_seq)\n\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 1.0), (60, 100, 1.0, 3.0), (64, 100, 1.0, 3.0)])\n\n # Make comparison easier\n poly_seq_ns.notes.sort(key=lambda n: (n.start_time, n.pitch))\n self.note_sequence.notes.sort(key=lambda n: (n.start_time, n.pitch))\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testToSequenceWithEmptySteps(self):\n poly_seq = polyphony_lib.PolyphonicSequence(\n steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq_ns = poly_seq.to_sequence(qpm=60.0)\n\n self.note_sequence.total_time = 2\n\n self.assertEqual(self.note_sequence, poly_seq_ns)\n\n def testSetLengthAddSteps(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n poly_seq.set_length(5)\n\n self.assertEqual(5, poly_seq.num_steps)\n self.assertListEqual([0, 0, 1, 2, 3, 4, 5], poly_seq.steps)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n # Add 5 more steps to make sure END is managed properly.\n poly_seq.set_length(10)\n\n self.assertEqual(10, poly_seq.num_steps)\n self.assertListEqual([0, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10], poly_seq.steps)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testSetLengthAddStepsToSequenceWithoutEnd(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n # Construct a list with one silence step and no END.\n pe = polyphony_lib.PolyphonicEvent\n poly_seq.append(pe(pe.STEP_END, None))\n\n poly_seq.set_length(2)\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.STEP_END, None),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testSetLengthAddStepsToSequenceWithUnfinishedStep(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n # Construct a list with one note and no STEP_END or END.\n pe = polyphony_lib.PolyphonicEvent\n poly_seq.append(pe(pe.NEW_NOTE, 60))\n\n poly_seq.set_length(2)\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testSetLengthRemoveSteps(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 2\n pe(pe.NEW_NOTE, 67),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq.set_length(2)\n poly_events = [\n pe(pe.START, None),\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n poly_seq.set_length(1)\n poly_events = [\n pe(pe.START, None),\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n poly_seq.set_length(0)\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testSetLengthRemoveStepsFromSequenceWithoutEnd(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n # Construct a list with two silence steps and no END.\n pe = polyphony_lib.PolyphonicEvent\n poly_seq.append(pe(pe.STEP_END, None))\n poly_seq.append(pe(pe.STEP_END, None))\n\n poly_seq.set_length(1)\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testSetLengthRemoveStepsFromSequenceWithUnfinishedStep(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n # Construct a list with a silence step, a new note, and no STEP_END or END.\n pe = polyphony_lib.PolyphonicEvent\n poly_seq.append(pe(pe.STEP_END, None))\n poly_seq.append(pe(pe.NEW_NOTE, 60))\n\n poly_seq.set_length(1)\n poly_events = [\n pe(pe.START, None),\n\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n self.assertEqual(poly_events, list(poly_seq))\n\n def testNumSteps(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n self.assertEqual(2, poly_seq.num_steps)\n self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)\n\n def testNumStepsIncompleteStep(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.STEP_END, None),\n # incomplete step. should not be counted.\n pe(pe.NEW_NOTE, 72),\n\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n self.assertEqual(2, poly_seq.num_steps)\n self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)\n\n def testSteps(self):\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.NEW_NOTE, 64),\n pe(pe.STEP_END, None),\n # step 1\n pe(pe.CONTINUED_NOTE, 60),\n pe(pe.CONTINUED_NOTE, 64),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n ]\n\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n for event in poly_events:\n poly_seq.append(event)\n self.assertListEqual([0, 0, 0, 0, 1, 1, 1, 2], poly_seq.steps)\n\n poly_seq = polyphony_lib.PolyphonicSequence(\n steps_per_quarter=1, start_step=2)\n for event in poly_events:\n poly_seq.append(event)\n self.assertListEqual([2, 2, 2, 2, 3, 3, 3, 4], poly_seq.steps)\n\n def testTrimTrailingEndEvents(self):\n poly_seq = polyphony_lib.PolyphonicSequence(steps_per_quarter=1)\n\n pe = polyphony_lib.PolyphonicEvent\n poly_events = [\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n\n pe(pe.END, None),\n pe(pe.END, None),\n ]\n for event in poly_events:\n poly_seq.append(event)\n\n poly_seq.trim_trailing_end_events()\n\n poly_events_expected = [\n pe(pe.START, None),\n # step 0\n pe(pe.NEW_NOTE, 60),\n pe(pe.STEP_END, None),\n ]\n\n self.assertEqual(poly_events_expected, list(poly_seq))\n\n def testExtractPolyphonicSequences(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0, [(60, 100, 0.0, 4.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)\n self.assertEqual(1, len(seqs))\n\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(\n quantized_sequence, min_steps_discard=2, max_steps_discard=5)\n self.assertEqual(1, len(seqs))\n\n self.note_sequence.notes[0].end_time = 1.0\n self.note_sequence.total_time = 1.0\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(\n quantized_sequence, min_steps_discard=3, max_steps_discard=5)\n self.assertEqual(0, len(seqs))\n\n self.note_sequence.notes[0].end_time = 10.0\n self.note_sequence.total_time = 10.0\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(\n quantized_sequence, min_steps_discard=3, max_steps_discard=5)\n self.assertEqual(0, len(seqs))\n\n def testExtractPolyphonicMultiProgram(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0,\n [(60, 100, 0.0, 4.0), (64, 100, 0.0, 3.0), (67, 100, 1.0, 2.0)])\n self.note_sequence.notes[0].program = 2\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(quantized_sequence)\n self.assertEqual(0, len(seqs))\n\n def testExtractNonZeroStart(self):\n testing_lib.add_track_to_sequence(\n self.note_sequence, 0, [(60, 100, 0.0, 4.0)])\n quantized_sequence = sequences_lib.quantize_note_sequence(\n self.note_sequence, steps_per_quarter=1)\n\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(\n quantized_sequence, start_step=4, min_steps_discard=1)\n self.assertEqual(0, len(seqs))\n seqs, _ = polyphony_lib.extract_polyphonic_sequences(\n quantized_sequence, start_step=0, min_steps_discard=1)\n self.assertEqual(1, len(seqs))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Classes for datasets and batches.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom magenta.models.coconet import lib_mask\nfrom magenta.models.coconet import lib_pianoroll\nfrom magenta.models.coconet import lib_util\nimport numpy as np\nimport tensorflow as tf\n\n\nclass Dataset(lib_util.Factory):\n \"\"\"Class for retrieving different datasets.\"\"\"\n\n def __init__(self, basepath, hparams, fold):\n \"\"\"Initialize a `Dataset` instance.\n\n Args:\n basepath: path to directory containing dataset npz files.\n hparams: Hyperparameters object.\n fold: data subset, one of {train,valid,test}.\n\n Raises:\n ValueError: if requested a temporal resolution shorter then that available\n in the dataset.\n \"\"\"\n self.basepath = basepath\n self.hparams = hparams\n self.fold = fold\n\n if self.shortest_duration != self.hparams.quantization_level:\n raise ValueError(\"The data has a temporal resolution of shortest \"\n \"duration=%r, requested=%r\" %\n (self.shortest_duration,\n self.hparams.quantization_level))\n\n # Update the default pitch ranges in hparams to reflect that of dataset.\n hparams.pitch_ranges = [self.min_pitch, self.max_pitch]\n hparams.shortest_duration = self.shortest_duration\n self.encoder = lib_pianoroll.get_pianoroll_encoder_decoder(hparams)\n data_path = os.path.join(tf.resource_loader.get_data_files_path(),\n self.basepath, \"%s.npz\" % self.name)\n print(\"Loading data from\", data_path)\n with tf.gfile.Open(data_path, \"r\") as p:\n self.data = np.load(p)[fold]\n\n @property\n def name(self):\n return self.hparams.dataset\n\n @property\n def num_examples(self):\n return len(self.data)\n\n @property\n def num_pitches(self):\n return self.max_pitch + 1 - self.min_pitch\n\n def get_sequences(self):\n \"\"\"Return the raw collection of examples.\"\"\"\n return self.data\n\n def get_pianorolls(self, sequences=None):\n \"\"\"Turn sequences into pianorolls.\n\n Args:\n sequences: the collection of sequences to convert. If not given, the\n entire dataset is converted.\n\n Returns:\n A list of multi-instrument pianorolls, each shaped\n (duration, pitches, instruments)\n \"\"\"\n if sequences is None:\n sequences = self.get_sequences()\n return list(map(self.encoder.encode, sequences))\n\n def get_featuremaps(self, sequences=None):\n \"\"\"Turn sequences into features for training/evaluation.\n\n Encodes sequences into randomly cropped and masked pianorolls, and returns\n a padded Batch containing three channels: the pianorolls, the corresponding\n masks and their lengths before padding (but after cropping).\n\n Args:\n sequences: the collection of sequences to convert. If not given, the\n entire dataset is converted.\n\n Returns:\n A Batch containing pianorolls, masks and piece lengths.\n \"\"\"\n if sequences is None:\n sequences = self.get_sequences()\n\n pianorolls = []\n masks = []\n\n for sequence in sequences:\n pianoroll = self.encoder.encode(sequence)\n pianoroll = lib_util.random_crop(pianoroll, self.hparams.crop_piece_len)\n mask = lib_mask.get_mask(\n self.hparams.maskout_method,\n pianoroll.shape,\n separate_instruments=self.hparams.separate_instruments,\n blankout_ratio=self.hparams.corrupt_ratio)\n pianorolls.append(pianoroll)\n masks.append(mask)\n\n (pianorolls, masks), lengths = lib_util.pad_and_stack(pianorolls, masks)\n assert pianorolls.ndim == 4 and masks.ndim == 4\n assert pianorolls.shape == masks.shape\n return Batch(pianorolls=pianorolls, masks=masks, lengths=lengths)\n\n def update_hparams(self, hparams):\n \"\"\"Update subset of Hyperparameters pertaining to data.\"\"\"\n for key in \"num_instruments min_pitch max_pitch qpm\".split():\n setattr(hparams, key, getattr(self, key))\n\n\ndef get_dataset(basepath, hparams, fold):\n \"\"\"Factory for Datasets.\"\"\"\n return Dataset.make(hparams.dataset, basepath, hparams, fold)\n\n\nclass Jsb16thSeparated(Dataset):\n key = \"Jsb16thSeparated\"\n min_pitch = 36\n max_pitch = 81\n shortest_duration = 0.125\n num_instruments = 4\n qpm = 60\n\n\nclass TestData(Dataset):\n key = \"TestData\"\n min_pitch = 0\n max_pitch = 127\n shortest_duration = 0.125\n num_instruments = 4\n qpm = 60\n\n\nclass Batch(object):\n \"\"\"A Batch of training/evaluation data.\"\"\"\n\n keys = set(\"pianorolls masks lengths\".split())\n\n def __init__(self, **kwargs):\n \"\"\"Initialize a Batch instance.\n\n Args:\n **kwargs: data dictionary. Must have three keys \"pianorolls\", \"masks\",\n \"lengths\", each corresponding to a model placeholder. Each value\n is a sequence (i.e. a batch) of examples.\n \"\"\"\n assert set(kwargs.keys()) == self.keys\n assert all(\n len(value) == len(kwargs.values()[0]) for value in kwargs.values())\n self.features = kwargs\n\n def get_feed_dict(self, placeholders):\n \"\"\"Zip placeholders and batch data into a feed dict.\n\n Args:\n placeholders: placeholder dictionary. Must have three keys \"pianorolls\",\n \"masks\" and \"lengths\".\n\n Returns:\n A feed dict mapping the given placeholders to the data in this batch.\n \"\"\"\n assert set(placeholders.keys()) == self.keys\n return dict((placeholders[key], self.features[key]) for key in self.keys)\n\n def batches(self, **batches_kwargs):\n \"\"\"Iterate over sub-batches of this batch.\n\n Args:\n **batches_kwargs: kwargs passed on to lib_util.batches.\n\n Yields:\n An iterator over sub-Batches.\n \"\"\"\n keys, values = list(zip(*list(self.features.items())))\n for batch in lib_util.batches(*values, **batches_kwargs):\n yield Batch(**dict(lib_util.eqzip(keys, batch)))\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for pipeline.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tempfile\n\nfrom magenta.common import testing_lib\nfrom magenta.pipelines import pipeline\nfrom magenta.pipelines import statistics\nimport tensorflow as tf\n\nMockStringProto = testing_lib.MockStringProto # pylint: disable=invalid-name\n\n\nclass MockPipeline(pipeline.Pipeline):\n\n def __init__(self):\n super(MockPipeline, self).__init__(\n input_type=str,\n output_type={'dataset_1': MockStringProto,\n 'dataset_2': MockStringProto})\n\n def transform(self, input_object):\n return {\n 'dataset_1': [\n MockStringProto(input_object + '_A'),\n MockStringProto(input_object + '_B')],\n 'dataset_2': [MockStringProto(input_object + '_C')]}\n\n\nclass PipelineTest(tf.test.TestCase):\n\n def testFileIteratorRecursive(self):\n target_files = [\n ('0.ext', b'hello world'),\n ('a/1.ext', b'123456'),\n ('a/2.ext', b'abcd'),\n ('b/c/3.ext', b'9999'),\n ('b/z/3.ext', b'qwerty'),\n ('d/4.ext', b'mary had a little lamb'),\n ('d/e/5.ext', b'zzzzzzzz'),\n ('d/e/f/g/6.ext', b'yyyyyyyyyyy')]\n extra_files = [\n ('stuff.txt', b'some stuff'),\n ('a/q/r/file', b'more stuff')]\n\n root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n for path, contents in target_files + extra_files:\n abs_path = os.path.join(root_dir, path)\n tf.gfile.MakeDirs(os.path.dirname(abs_path))\n tf.gfile.GFile(abs_path, mode='w').write(contents)\n\n file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=True)\n\n self.assertEqual(set(contents for _, contents in target_files),\n set(file_iterator))\n\n def testFileIteratorNotRecursive(self):\n target_files = [\n ('0.ext', b'hello world'),\n ('1.ext', b'hi')]\n extra_files = [\n ('a/1.ext', b'123456'),\n ('a/2.ext', b'abcd'),\n ('b/c/3.ext', b'9999'),\n ('d/e/5.ext', b'zzzzzzzz'),\n ('d/e/f/g/6.ext', b'yyyyyyyyyyy'),\n ('stuff.txt', b'some stuff'),\n ('a/q/r/file', b'more stuff')]\n\n root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n for path, contents in target_files + extra_files:\n abs_path = os.path.join(root_dir, path)\n tf.gfile.MakeDirs(os.path.dirname(abs_path))\n tf.gfile.GFile(abs_path, mode='w').write(contents)\n\n file_iterator = pipeline.file_iterator(root_dir, 'ext', recurse=False)\n\n self.assertEqual(set(contents for _, contents in target_files),\n set(file_iterator))\n\n def testTFRecordIterator(self):\n tfrecord_file = os.path.join(\n tf.resource_loader.get_data_files_path(),\n '../testdata/tfrecord_iterator_test.tfrecord')\n self.assertEqual(\n [MockStringProto(string)\n for string in [b'hello world', b'12345', b'success']],\n list(pipeline.tf_record_iterator(tfrecord_file, MockStringProto)))\n\n def testRunPipelineSerial(self):\n strings = ['abcdefg', 'helloworld!', 'qwerty']\n root_dir = tempfile.mkdtemp(dir=self.get_temp_dir())\n pipeline.run_pipeline_serial(\n MockPipeline(), iter(strings), root_dir)\n\n dataset_1_dir = os.path.join(root_dir, 'dataset_1.tfrecord')\n dataset_2_dir = os.path.join(root_dir, 'dataset_2.tfrecord')\n self.assertTrue(tf.gfile.Exists(dataset_1_dir))\n self.assertTrue(tf.gfile.Exists(dataset_2_dir))\n\n dataset_1_reader = tf.python_io.tf_record_iterator(dataset_1_dir)\n self.assertEqual(\n set([('serialized:%s_A' % s).encode('utf-8') for s in strings] +\n [('serialized:%s_B' % s).encode('utf-8') for s in strings]),\n set(dataset_1_reader))\n\n dataset_2_reader = tf.python_io.tf_record_iterator(dataset_2_dir)\n self.assertEqual(\n set(('serialized:%s_C' % s).encode('utf-8') for s in strings),\n set(dataset_2_reader))\n\n def testPipelineIterator(self):\n strings = ['abcdefg', 'helloworld!', 'qwerty']\n result = pipeline.load_pipeline(MockPipeline(), iter(strings))\n\n self.assertEqual(\n set([MockStringProto(s + '_A') for s in strings] +\n [MockStringProto(s + '_B') for s in strings]),\n set(result['dataset_1']))\n self.assertEqual(\n set(MockStringProto(s + '_C') for s in strings),\n set(result['dataset_2']))\n\n def testPipelineKey(self):\n # This happens if PipelineKey() is used on a pipeline with out a dictionary\n # output, or the key is not in the output_type dict.\n pipeline_inst = MockPipeline()\n pipeline_key = pipeline_inst['dataset_1']\n self.assertTrue(isinstance(pipeline_key, pipeline.PipelineKey))\n self.assertEqual(pipeline_key.key, 'dataset_1')\n self.assertEqual(pipeline_key.unit, pipeline_inst)\n self.assertEqual(pipeline_key.output_type, MockStringProto)\n with self.assertRaises(KeyError):\n _ = pipeline_inst['abc']\n\n class TestPipeline(pipeline.Pipeline):\n\n def __init__(self):\n super(TestPipeline, self).__init__(str, str)\n\n def transform(self, input_object):\n pass\n\n pipeline_inst = TestPipeline()\n with self.assertRaises(KeyError):\n _ = pipeline_inst['abc']\n\n with self.assertRaises(ValueError):\n _ = pipeline.PipelineKey(1234, 'abc')\n\n def testInvalidTypeSignatureError(self):\n\n class PipelineShell(pipeline.Pipeline):\n\n def transform(self, input_object):\n pass\n\n _ = PipelineShell(str, str)\n _ = PipelineShell({'name': str}, {'name': str})\n\n good_type = str\n for bad_type in [123, {1: str}, {'name': 123},\n {'name': str, 'name2': 123}, [str, int]]:\n with self.assertRaises(pipeline.InvalidTypeSignatureError):\n PipelineShell(bad_type, good_type)\n with self.assertRaises(pipeline.InvalidTypeSignatureError):\n PipelineShell(good_type, bad_type)\n\n def testPipelineGivenName(self):\n\n class TestPipeline123(pipeline.Pipeline):\n\n def __init__(self):\n super(TestPipeline123, self).__init__(str, str, 'TestName')\n self.stats = []\n\n def transform(self, input_object):\n self._set_stats([statistics.Counter('counter_1', 5),\n statistics.Counter('counter_2', 10)])\n return []\n\n pipe = TestPipeline123()\n self.assertEqual(pipe.name, 'TestName')\n pipe.transform('hello')\n stats = pipe.get_stats()\n self.assertEqual(\n set((stat.name, stat.count) for stat in stats),\n set([('TestName_counter_1', 5), ('TestName_counter_2', 10)]))\n\n def testPipelineDefaultName(self):\n\n class TestPipeline123(pipeline.Pipeline):\n\n def __init__(self):\n super(TestPipeline123, self).__init__(str, str)\n self.stats = []\n\n def transform(self, input_object):\n self._set_stats([statistics.Counter('counter_1', 5),\n statistics.Counter('counter_2', 10)])\n return []\n\n pipe = TestPipeline123()\n self.assertEqual(pipe.name, 'TestPipeline123')\n pipe.transform('hello')\n stats = pipe.get_stats()\n self.assertEqual(\n set((stat.name, stat.count) for stat in stats),\n set([('TestPipeline123_counter_1', 5),\n ('TestPipeline123_counter_2', 10)]))\n\n def testInvalidStatisticsError(self):\n\n class TestPipeline1(pipeline.Pipeline):\n\n def __init__(self):\n super(TestPipeline1, self).__init__(object, object)\n self.stats = []\n\n def transform(self, input_object):\n self._set_stats([statistics.Counter('counter_1', 5), 12345])\n return []\n\n class TestPipeline2(pipeline.Pipeline):\n\n def __init__(self):\n super(TestPipeline2, self).__init__(object, object)\n self.stats = []\n\n def transform(self, input_object):\n self._set_stats(statistics.Counter('counter_1', 5))\n return [input_object]\n\n tp1 = TestPipeline1()\n with self.assertRaises(pipeline.InvalidStatisticsError):\n tp1.transform('hello')\n\n tp2 = TestPipeline2()\n with self.assertRaises(pipeline.InvalidStatisticsError):\n tp2.transform('hello')\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Defines the GlyphAzznProblem.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl import logging\nfrom magenta.models.svg_vae import svg_utils\nimport numpy as np\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.layers import common_layers\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import registry\nimport tensorflow as tf\n\n# Raw dataset paths (from datagen_beam.py)\n# (Run t2t datagen on GlyphAzznProblem to convert these into a t2t dataset)\nRAW_STAT_FILE = '/path/to/glyphazzn-internal-stats-00000-of-00001'\nRAW_DATA_FILES = '/path/to/glyphazzn-internal-train*'\nURL_SPLITS = 'third_party/py/magenta/models/svg_vae/glyphazzn_urls_split.txt'\n\n\nclass IdentityEncoder(object):\n\n def encode(self, inputs):\n return inputs\n\n def decode(self, inputs):\n return inputs\n\n\[email protected]_problem\nclass GlyphAzznProblem(problem.Problem):\n \"\"\"Defines the GlyphAzznProblem class.\"\"\"\n\n @property\n def dataset_splits(self):\n \"\"\"Data splits to produce and number of shards for each.\"\"\"\n # 10% evaluation data\n return [{\n 'split': problem.DatasetSplit.TRAIN,\n 'shards': 90,\n }, {\n 'split': problem.DatasetSplit.TEST,\n 'shards': 10,\n }]\n\n @property\n def is_generate_per_split(self):\n # the data comes pre-split. so we should not shuffle and split it again.\n # this also means generate_samples will be called twice (one per split)\n return True\n\n def has_inputs(self):\n return True\n\n def feature_encoders(self, data_dir):\n del data_dir\n return {\n 'inputs': IdentityEncoder(),\n 'targets': IdentityEncoder()\n }\n\n def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):\n # ignore any encoding since we don't need that\n return self.generate_samples(data_dir, tmp_dir, dataset_split)\n\n def generate_data(self, data_dir, tmp_dir, task_id=-1):\n filepath_fns = {\n problem.DatasetSplit.TRAIN: self.training_filepaths,\n problem.DatasetSplit.EVAL: self.dev_filepaths,\n problem.DatasetSplit.TEST: self.test_filepaths,\n }\n\n split_paths = [(split['split'], filepath_fns[split['split']](\n data_dir, split['shards'], shuffled=False))\n for split in self.dataset_splits]\n all_paths = []\n for _, paths in split_paths:\n all_paths.extend(paths)\n\n if self.is_generate_per_split:\n for split, paths in split_paths:\n generator_utils.generate_files(\n self.generate_encoded_samples(data_dir, tmp_dir, split), paths)\n else:\n generator_utils.generate_files(\n self.generate_encoded_samples(\n data_dir, tmp_dir, problem.DatasetSplit.TRAIN), all_paths)\n\n generator_utils.shuffle_dataset(all_paths)\n\n @property\n def categorical(self):\n # indicates we're using one-hot categories for command type.\n return True\n\n @property\n def feature_dim(self):\n return 10\n\n @property\n def num_classes(self):\n return 30\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n \"\"\"Generate samples of target svg commands.\"\"\"\n if not hasattr(self, 'splits'):\n tf.logging.info(\n 'Loading binary_fp: train/test from {}'.format(URL_SPLITS))\n self.splits = {}\n for line in tf.gfile.Open(URL_SPLITS, 'r').read().split('\\n'):\n if line:\n line = line.split(', ')\n self.splits[line[0]] = line[1]\n\n if not tf.gfile.Exists(data_dir):\n tf.gfile.MakeDirs(data_dir)\n\n if not tf.gfile.Exists(os.path.join(data_dir, 'mean.npz')):\n # FIRST, COPY THE MEAN/STDEV INTO DATA_DIR, in npz format\n for serialized_stats in tf.python_io.tf_record_iterator(RAW_STAT_FILE):\n stats = tf.train.Example()\n stats.ParseFromString(serialized_stats)\n mean = np.array(stats.features.feature['mean'].float_list.value)\n stdev = np.array(stats.features.feature['stddev'].float_list.value)\n # also want to set mean[:4] to zeros and stdev[:4] to ones, because\n # these are the class labels\n mean = np.concatenate((np.zeros([4]), mean[4:]), axis=0)\n stdev = np.concatenate((np.ones([4]), stdev[4:]), axis=0)\n # finally, save\n np.save(tf.gfile.Open(os.path.join(data_dir, 'mean.npz'), 'w'), mean)\n np.save(tf.gfile.Open(os.path.join(data_dir, 'stdev.npz'), 'w'), stdev)\n logging.info('Generated mean and stdev npzs')\n\n for raw_data_file in tf.gfile.Glob(RAW_DATA_FILES):\n for serialized_example in tf.python_io.tf_record_iterator(raw_data_file):\n example = tf.train.Example()\n example.ParseFromString(serialized_example)\n\n # determing whether this example belongs to a fontset in train or test\n this_bfp = str(\n example.features.feature['binary_fp'].bytes_list.value[0])\n if this_bfp not in self.splits:\n # randomly sample 10% to be test, the rest is train\n should_be_test = np.random.random() < 0.1\n self.splits[this_bfp] = 'test' if should_be_test else 'train'\n\n if self.splits[this_bfp] != dataset_split:\n continue\n\n yield {\n 'targets_sln': np.array(\n example.features.feature['seq_len'].int64_list.value).astype(\n np.int64).tolist(),\n 'targets_cls': np.array(\n example.features.feature['class'].int64_list.value).astype(\n np.int64).tolist(),\n 'targets_rel': np.array(\n example.features.feature['sequence'].float_list.value).astype(\n np.float32).tolist(),\n 'targets_rnd': np.array(\n example.features.feature['rendered'].float_list.value).astype(\n np.float32).tolist()\n }\n\n def example_reading_spec(self):\n data_fields = {'targets_rel': tf.FixedLenFeature([51*10], tf.float32),\n 'targets_rnd': tf.FixedLenFeature([64*64], tf.float32),\n 'targets_sln': tf.FixedLenFeature([1], tf.int64),\n 'targets_cls': tf.FixedLenFeature([1], tf.int64)}\n\n data_items_to_decoders = None\n return (data_fields, data_items_to_decoders)\n\n def preprocess_example(self, example, unused_mode, hparams):\n \"\"\"Time series are flat on disk, we un-flatten them back here.\"\"\"\n if not hasattr(self, 'mean_npz'):\n mean_filename = os.path.join(hparams.data_dir, 'mean.npz')\n stdev_filename = os.path.join(hparams.data_dir, 'stdev.npz')\n with tf.gfile.Open(mean_filename, 'r') as f:\n self.mean_npz = np.load(f)\n with tf.gfile.Open(stdev_filename, 'r') as f:\n self.stdev_npz = np.load(f)\n\n example['targets_cls'] = tf.reshape(example['targets_cls'], [1])\n example['targets_sln'] = tf.reshape(example['targets_sln'], [1])\n\n example['targets_rel'] = tf.reshape(example['targets_rel'], [51, 1, 10])\n # normalize (via gaussian)\n example['targets_rel'] = (example['targets_rel'] -\n self.mean_npz) / self.stdev_npz\n\n # redefine shape inside model!\n example['targets_psr'] = tf.reshape(example['targets_rnd'],\n [1, 64 * 64]) / 255.\n del example['targets_rnd']\n\n if hparams.just_render:\n # training vae mode, use the last image (rendered icon) as input & output\n example['inputs'] = example['targets_psr'][-1, :]\n example['targets'] = example['targets_psr'][-1, :]\n else:\n example['inputs'] = tf.identity(example['targets_rel'])\n example['targets'] = tf.identity(example['targets_rel'])\n\n return example\n\n def hparams(self, defaults, model_hparams):\n p = defaults\n p.stop_at_eos = int(False)\n p.vocab_size = {'inputs': self.feature_dim, 'targets': self.feature_dim}\n p.modality = {'inputs': modalities.ModalityType.IDENTITY,\n 'targets': modalities.ModalityType.IDENTITY}\n\n @property\n def decode_hooks(self):\n to_img = svg_utils.create_image_conversion_fn(\n 1, categorical=self.categorical)\n\n def sample_image(decode_hook_args):\n \"\"\"Converts decoded predictions into summaries.\"\"\"\n hparams = decode_hook_args.hparams\n\n if not hasattr(self, 'mean_npz'):\n mean_filename = os.path.join(hparams.data_dir, 'mean.npz')\n stdev_filename = os.path.join(hparams.data_dir, 'stdev.npz')\n with tf.gfile.open(mean_filename, 'r') as f:\n self.mean_npz = np.load(f)\n with tf.gfile.open(stdev_filename, 'r') as f:\n self.stdev_npz = np.load(f)\n\n values = []\n for pred_dict in decode_hook_args.predictions[0]:\n if hparams.just_render:\n # vae mode, outputs is image, just do image summary and continue\n values.append(svg_utils.make_image_summary(\n pred_dict['outputs'], 'rendered_outputs'))\n values.append(svg_utils.make_image_summary(\n pred_dict['targets'], 'rendered_targets'))\n continue\n\n if common_layers.shape_list(pred_dict['targets'])[0] == 1:\n continue\n\n # undo normalize (via gaussian)\n denorm_outputs = (pred_dict['outputs'] * self.stdev_npz) + self.mean_npz\n denorm_targets = (pred_dict['targets'] * self.stdev_npz) + self.mean_npz\n\n # simple cmds are 10 dim (4 one-hot, 6 args).\n # Convert to full SVG spec dimensionality so we can convert it to text.\n denorm_outputs = svg_utils.make_simple_cmds_long(denorm_outputs)\n denorm_targets = svg_utils.make_simple_cmds_long(denorm_targets)\n\n # sampled text summary\n output_svg = to_img([np.reshape(denorm_outputs, [-1, 30])])\n values.append(svg_utils.make_text_summary_value(output_svg,\n 'img/sampled'))\n\n # original text summary\n target_svg = to_img([np.reshape(denorm_targets, [-1, 30])])\n values.append(svg_utils.make_text_summary_value(target_svg, 'img/og'))\n\n return values\n return [sample_image]\n\n def eval_metrics(self):\n return []\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for performance_rnn_create_dataset.\"\"\"\n\nimport magenta\nfrom magenta.models.performance_rnn import performance_model\nfrom magenta.pipelines import performance_pipeline\nfrom magenta.protobuf import music_pb2\nimport tensorflow as tf\n\nFLAGS = tf.app.flags.FLAGS\n\n\nclass PerformancePipelineTest(tf.test.TestCase):\n\n def setUp(self):\n super(PerformancePipelineTest, self).setUp()\n self.config = performance_model.PerformanceRnnConfig(\n None,\n magenta.music.OneHotEventSequenceEncoderDecoder(\n magenta.music.PerformanceOneHotEncoding()),\n tf.contrib.training.HParams())\n\n def testPerformanceRnnPipeline(self):\n note_sequence = music_pb2.NoteSequence()\n magenta.music.testing_lib.add_track_to_sequence(\n note_sequence, 0,\n [(36, 100, 0.00, 2.0), (40, 55, 2.1, 5.0), (44, 80, 3.6, 5.0),\n (41, 45, 5.1, 8.0), (64, 100, 6.6, 10.0), (55, 120, 8.1, 11.0),\n (39, 110, 9.6, 9.7), (53, 99, 11.1, 14.1), (51, 40, 12.6, 13.0),\n (55, 100, 14.1, 15.0), (54, 90, 15.6, 17.0), (60, 100, 17.1, 18.0)])\n\n pipeline_inst = performance_pipeline.get_pipeline(\n min_events=32,\n max_events=512,\n eval_ratio=0,\n config=self.config)\n result = pipeline_inst.transform(note_sequence)\n self.assertTrue(len(result['training_performances']))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Copyright 2019 The Magenta Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"MusicVAE generation script.\"\"\"\n\n# TODO(adarob): Add support for models with conditioning.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport sys\nimport time\n\nfrom magenta import music as mm\nfrom magenta.models.music_vae import configs\nfrom magenta.models.music_vae import TrainedModel\nimport numpy as np\nimport tensorflow as tf\n\nflags = tf.app.flags\nlogging = tf.logging\nFLAGS = flags.FLAGS\n\nflags.DEFINE_string(\n 'run_dir', None,\n 'Path to the directory where the latest checkpoint will be loaded from.')\nflags.DEFINE_string(\n 'checkpoint_file', None,\n 'Path to the checkpoint file. run_dir will take priority over this flag.')\nflags.DEFINE_string(\n 'output_dir', '/tmp/music_vae/generated',\n 'The directory where MIDI files will be saved to.')\nflags.DEFINE_string(\n 'config', None,\n 'The name of the config to use.')\nflags.DEFINE_string(\n 'mode', 'sample',\n 'Generate mode (either `sample` or `interpolate`).')\nflags.DEFINE_string(\n 'input_midi_1', None,\n 'Path of start MIDI file for interpolation.')\nflags.DEFINE_string(\n 'input_midi_2', None,\n 'Path of end MIDI file for interpolation.')\nflags.DEFINE_integer(\n 'num_outputs', 5,\n 'In `sample` mode, the number of samples to produce. In `interpolate` '\n 'mode, the number of steps (including the endpoints).')\nflags.DEFINE_integer(\n 'max_batch_size', 8,\n 'The maximum batch size to use. Decrease if you are seeing an OOM.')\nflags.DEFINE_float(\n 'temperature', 0.5,\n 'The randomness of the decoding process.')\nflags.DEFINE_string(\n 'log', 'INFO',\n 'The threshold for what messages will be logged: '\n 'DEBUG, INFO, WARN, ERROR, or FATAL.')\n\n\ndef _slerp(p0, p1, t):\n \"\"\"Spherical linear interpolation.\"\"\"\n omega = np.arccos(\n np.dot(np.squeeze(p0/np.linalg.norm(p0)),\n np.squeeze(p1/np.linalg.norm(p1))))\n so = np.sin(omega)\n return np.sin((1.0-t)*omega) / so * p0 + np.sin(t*omega)/so * p1\n\n\ndef run(config_map):\n \"\"\"Load model params, save config file and start trainer.\n\n Args:\n config_map: Dictionary mapping configuration name to Config object.\n\n Raises:\n ValueError: if required flags are missing or invalid.\n \"\"\"\n date_and_time = time.strftime('%Y-%m-%d_%H%M%S')\n\n if FLAGS.run_dir is None == FLAGS.checkpoint_file is None:\n raise ValueError(\n 'Exactly one of `--run_dir` or `--checkpoint_file` must be specified.')\n if FLAGS.output_dir is None:\n raise ValueError('`--output_dir` is required.')\n tf.gfile.MakeDirs(FLAGS.output_dir)\n if FLAGS.mode != 'sample' and FLAGS.mode != 'interpolate':\n raise ValueError('Invalid value for `--mode`: %s' % FLAGS.mode)\n\n if FLAGS.config not in config_map:\n raise ValueError('Invalid config name: %s' % FLAGS.config)\n config = config_map[FLAGS.config]\n config.data_converter.max_tensors_per_item = None\n\n if FLAGS.mode == 'interpolate':\n if FLAGS.input_midi_1 is None or FLAGS.input_midi_2 is None:\n raise ValueError(\n '`--input_midi_1` and `--input_midi_2` must be specified in '\n '`interpolate` mode.')\n input_midi_1 = os.path.expanduser(FLAGS.input_midi_1)\n input_midi_2 = os.path.expanduser(FLAGS.input_midi_2)\n if not os.path.exists(input_midi_1):\n raise ValueError('Input MIDI 1 not found: %s' % FLAGS.input_midi_1)\n if not os.path.exists(input_midi_2):\n raise ValueError('Input MIDI 2 not found: %s' % FLAGS.input_midi_2)\n input_1 = mm.midi_file_to_note_sequence(input_midi_1)\n input_2 = mm.midi_file_to_note_sequence(input_midi_2)\n\n def _check_extract_examples(input_ns, path, input_number):\n \"\"\"Make sure each input returns exactly one example from the converter.\"\"\"\n tensors = config.data_converter.to_tensors(input_ns).outputs\n if not tensors:\n print(\n 'MusicVAE configs have very specific input requirements. Could not '\n 'extract any valid inputs from `%s`. Try another MIDI file.' % path)\n sys.exit()\n elif len(tensors) > 1:\n basename = os.path.join(\n FLAGS.output_dir,\n '%s_input%d-extractions_%s-*-of-%03d.mid' %\n (FLAGS.config, input_number, date_and_time, len(tensors)))\n for i, ns in enumerate(config.data_converter.to_notesequences(tensors)):\n mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))\n print(\n '%d valid inputs extracted from `%s`. Outputting these potential '\n 'inputs as `%s`. Call script again with one of these instead.' %\n (len(tensors), path, basename))\n sys.exit()\n logging.info(\n 'Attempting to extract examples from input MIDIs using config `%s`...',\n FLAGS.config)\n _check_extract_examples(input_1, FLAGS.input_midi_1, 1)\n _check_extract_examples(input_2, FLAGS.input_midi_2, 2)\n\n logging.info('Loading model...')\n if FLAGS.run_dir:\n checkpoint_dir_or_path = os.path.expanduser(\n os.path.join(FLAGS.run_dir, 'train'))\n else:\n checkpoint_dir_or_path = os.path.expanduser(FLAGS.checkpoint_file)\n model = TrainedModel(\n config, batch_size=min(FLAGS.max_batch_size, FLAGS.num_outputs),\n checkpoint_dir_or_path=checkpoint_dir_or_path)\n\n if FLAGS.mode == 'interpolate':\n logging.info('Interpolating...')\n _, mu, _ = model.encode([input_1, input_2])\n z = np.array([\n _slerp(mu[0], mu[1], t) for t in np.linspace(0, 1, FLAGS.num_outputs)])\n results = model.decode(\n length=config.hparams.max_seq_len,\n z=z,\n temperature=FLAGS.temperature)\n elif FLAGS.mode == 'sample':\n logging.info('Sampling...')\n results = model.sample(\n n=FLAGS.num_outputs,\n length=config.hparams.max_seq_len,\n temperature=FLAGS.temperature)\n\n basename = os.path.join(\n FLAGS.output_dir,\n '%s_%s_%s-*-of-%03d.mid' %\n (FLAGS.config, FLAGS.mode, date_and_time, FLAGS.num_outputs))\n logging.info('Outputting %d files as `%s`...', FLAGS.num_outputs, basename)\n for i, ns in enumerate(results):\n mm.sequence_proto_to_midi_file(ns, basename.replace('*', '%03d' % i))\n\n logging.info('Done.')\n\n\ndef main(unused_argv):\n logging.set_verbosity(FLAGS.log)\n run(configs.CONFIG_MAP)\n\n\ndef console_entry_point():\n tf.app.run(main)\n\n\nif __name__ == '__main__':\n console_entry_point()\n"
] | [
[
"tensorflow.global_variables_initializer",
"tensorflow.test.main",
"tensorflow.random_normal"
],
[
"tensorflow.name_scope",
"numpy.float32",
"tensorflow.reduce_mean"
],
[
"numpy.log",
"numpy.tile",
"tensorflow.logging.info",
"numpy.array",
"tensorflow.contrib.training.HParams"
],
[
"tensorflow.Graph",
"tensorflow.flags.mark_flags_as_required",
"tensorflow.gfile.Open",
"tensorflow.gfile.Exists",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.logging.error",
"tensorflow.gfile.MakeDirs",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.logging.info",
"tensorflow.Session",
"tensorflow.app.flags.DEFINE_boolean",
"numpy.random.RandomState"
],
[
"tensorflow.test.main"
],
[
"numpy.load",
"tensorflow.resource_loader.get_data_files_path",
"tensorflow.gfile.Open"
],
[
"tensorflow.resource_loader.get_data_files_path",
"tensorflow.gfile.Exists",
"tensorflow.gfile.GFile",
"tensorflow.test.main",
"tensorflow.python_io.tf_record_iterator"
],
[
"numpy.random.random",
"tensorflow.FixedLenFeature",
"tensorflow.train.Example",
"tensorflow.gfile.Exists",
"tensorflow.gfile.Open",
"numpy.reshape",
"tensorflow.reshape",
"tensorflow.identity",
"numpy.ones",
"tensorflow.gfile.MakeDirs",
"tensorflow.gfile.Glob",
"tensorflow.python_io.tf_record_iterator",
"numpy.load",
"numpy.array",
"numpy.zeros",
"tensorflow.gfile.open"
],
[
"tensorflow.contrib.training.HParams",
"tensorflow.test.main"
],
[
"numpy.linspace",
"numpy.linalg.norm",
"numpy.sin",
"tensorflow.gfile.MakeDirs",
"tensorflow.app.run"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
paxtonedgar/MisInfo | [
"81b32fa3e7d0d204feb83e10169093f45727a2ea",
"81b32fa3e7d0d204feb83e10169093f45727a2ea"
] | [
"src/trainers/lstm_attn_trainer.py",
"src/models/bnb.py"
] | [
"\nimport torch\n\nimport numpy as np\n\nfrom src.trainers.base_trainer import BaseTrainer\nfrom src.evaluation.metrics import Metrics\n\n\nclass LSTMAttnTrainer(BaseTrainer):\n \"\"\"\n Trainer class. Optimizer is by default handled by BaseTrainer.\n \"\"\"\n def __init__(self, model, config):\n super(LSTMAttnTrainer, self).__init__(model, config)\n self._log_interval = config['log_interval']\n self._batch_size = config['dataloader_params']['batch_size']\n self._logger.info('Batch size: %d', self._batch_size)\n\n def _train_epoch(self, epoch, train_iter, dev_iter):\n \"\"\"\n :param epoch:\n :param train_iter:\n :param dev_iter:\n :return:\n \"\"\"\n # turn on training mode which enables dropout\n self._model.train()\n\n total_loss = 0\n predicted_values = []\n target_values = []\n\n labels = np.arange(self._model.num_classes)\n\n for batch_idx, batch in enumerate(train_iter):\n (data, lengths), target = self._to_tensor(batch.text, batch.label)\n\n self._optimizer.zero_grad()\n output, attn_w = self._model(data, lengths)\n # output = self._model(data, lengths)\n loss = self._loss_function(output, target, reduction='sum')\n loss.backward()\n self._optimizer.step()\n\n total_loss += loss.item()\n\n predictions = torch.max(output, 1)[1].view(target.size())\n predicted_values.extend(predictions.data.tolist())\n target_values.extend(target.data.tolist())\n\n if (batch_idx + 1) % self._log_interval == 0:\n results = Metrics.metrics(\n predicted_values, target_values, labels\n )\n self._logger.info(\n 'Epoch: {:3d} [{:5d}/{:5.0f} batches] '\n 'Current loss: {:5.6f}, Total average loss: {:5.6f}, '\n 'F-score: {:5.2f}'.format(\n epoch, (batch_idx + 1),\n len(train_iter.dataset) / self._batch_size,\n loss.item() / self._batch_size,\n total_loss / results['n_samples'],\n results['f_score']\n )\n )\n\n results_train = Metrics.metrics(predicted_values, target_values, labels)\n results_train['loss'] = total_loss / results_train['n_samples']\n results_val, _ = self.evaluate(dev_iter)\n\n log = {'epoch': epoch}\n log.update({'train_{}'.format(k): v for k, v in results_train.items()})\n log.update({'val_{}'.format(k): v for k, v in results_val.items()})\n\n return log\n\n def evaluate(self, data_iter):\n \"\"\"\n Validate after training an epoch\n :param data_iter:\n :return:\n \"\"\"\n # switch to evaluation mode (won't dropout)\n self._model.eval()\n\n total_loss = 0\n predicted_values = []\n target_values = []\n\n labels = np.arange(self._model.num_classes)\n\n with torch.no_grad():\n for batch_idx, batch in enumerate(data_iter):\n (data, lengths), target = self._to_tensor(\n batch.text, batch.label\n )\n\n output, attn_w = self._model(data, lengths)\n # output = self._model(data, lengths)\n loss = self._loss_function(output, target, reduction='sum')\n\n total_loss += loss.item()\n\n predictions = torch.max(output, 1)[1].view(target.size())\n predicted_values.extend(predictions.data.tolist())\n target_values.extend(target.data.tolist())\n\n results = Metrics.metrics(predicted_values, target_values, labels)\n results['loss'] = total_loss / results['n_samples']\n\n self._logger.info(\n 'Evaluation: Loss: {:5.6f}, F-score: {:5.2f}% ({}/{})'.format(\n results['loss'], results['f_score'],\n results['correct'], results['n_samples']\n )\n )\n\n return results, predicted_values\n",
"\"\"\"\nBernoulli Naive Bayes.\n\"\"\"\n\nimport logging\nfrom typing import Dict, Union, Any\n\nimport numpy as np\nfrom sklearn.naive_bayes import BernoulliNB\n\nfrom src.evaluation.metrics import Metrics\nfrom src.models.base_model import BaseModel, DataMatrix, LabelVector\n\n\nclass BNB(BernoulliNB, BaseModel):\n \"\"\"\n Bernoulli Naive Bayes.\n \"\"\"\n\n def __init__(self, model_cfg: Dict[str, Any] = None) -> None:\n \"\"\"Initialize class, pass config to parent\n\n :param model_cfg: model config\n :type model_cfg: Dict[str, Any], default None\n :return: None\n :rtype: None\n \"\"\"\n if not model_cfg:\n model_cfg = {}\n super(BNB, self).__init__(**model_cfg)\n self._logger = logging.getLogger(__name__)\n\n def train_test(\n self, X_train: DataMatrix, X_test: DataMatrix,\n y_train: LabelVector, y_test: LabelVector\n ) -> Dict[str, Union[int, float]]:\n \"\"\"Train a dummy classifier and return predictions on test data.\n\n :param X_train: numpy array of shape [n_train_samples, n_features]\n :type X_train: DataMatrix\n :param X_test: numpy array of shape [n_test_samples, n_features]\n :type X_test: DataMatrix\n :param y_train: numpy array of shape [n_train_samples]\n :type y_train: LabelVector\n :param y_test: numpy array of shape [n_test_samples]\n :type y_test: LabelVector\n :return: performance metrics\n :rtype: Dict[str, Union[int, float]]\n \"\"\"\n y_train = np.array(y_train)\n y_test = np.array(y_test)\n labels = list(set(y_train).union(set(y_test)))\n\n # fit\n self._logger.info('Fitting %s classifier to data', __name__)\n self.fit(X_train, y_train)\n\n # predict\n self._logger.info('Done fitting to data, obtaining predictions')\n pred_train = self.predict(X_train)\n pred_test = self.predict(X_test)\n results = {\n f'train_{k}': v for k, v in\n Metrics.metrics(pred_train, y_train, labels).items()\n }\n results.update({\n f'test_{k}': v for k, v in\n Metrics.metrics(pred_test, y_test, labels).items()\n })\n self._logger.info('Done testing %s', __name__)\n return results\n"
] | [
[
"numpy.arange",
"torch.no_grad",
"torch.max"
],
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RubenImhoff/Large_Sample_Nowcasting_Evaluation | [
"b2d8500261881a749a8f20815b7e2b0b9b69c4f7",
"b2d8500261881a749a8f20815b7e2b0b9b69c4f7",
"b2d8500261881a749a8f20815b7e2b0b9b69c4f7",
"b2d8500261881a749a8f20815b7e2b0b9b69c4f7",
"b2d8500261881a749a8f20815b7e2b0b9b69c4f7"
] | [
"HPCrunScripts/PS_DeterministicNowcast_parallel_advection_24h.py",
"pysteps/pysteps/extrapolation/semilagrangian.py",
"pysteps/build/lib/pysteps/motion/vet.py",
"pysteps/pysteps/noise/utils.py",
"pysteps/pysteps/verification/detcatscores.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 17 07:41:32 2019\n\nDeterministic nowcast with pySTEPS, with extraction of results per catchment. \nBased on the input data for the Ensemble nowcast, but without any ensembles. \n\nMake sure to change the initial part to your case.\n\nNote that this script assumes that the catchments are already reprojected.\n\nTO DO - add _reprojected to input and change this later on in the script.\n\n@author: imhof_rn\n\"\"\"\n\nfrom osgeo import gdal\nfrom osgeo import gdal_array\nfrom osgeo import ogr, osr\n\nimport os\nos.environ['PROJ_LIB'] = r'/u/imhof_rn/anaconda3/pkgs/proj4-5.2.0-h470a237_1/share/proj'\n\nimport mkl\nmkl.set_num_threads(1)\n\nimport datetime\nimport netCDF4\nimport numpy as np\nimport pprint\nimport sys\nimport time\n\nimport pysteps as stp\nimport config as cfg\n\nimport logging\nimport itertools\n\nlogging.basicConfig(level=logging.INFO)\n\n# import message passing interface for python\nfrom mpi4py import MPI\n\n# import for memory use\n#from pympler import tracker\n#tr = tracker.SummaryTracker()\n#tr.print_diff() \n\n###############################################################################\n#################\n# Initial part, only change this\n# NOTE: This script only works when the catchment shapefiles are already reprojected\n# to the KNMI radar dataset.\n#################\n\nos.chdir('/u/imhof_rn/pysteps-0.2')\n\n# Catchment filenames and directories\ncatchments = True # Put on false when you don't want any slicing for catchments (i.e. you will use the full output)\n# If catchments = 'False', uncomment the next two lines.\ncatchment_filenames = [\"/u/imhof_rn/GIS/Catchments_pysteps/Hupsel.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/stroomgebied_Regge.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/GroteWaterleiding.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Aa.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Reusel.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/het_molentje.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Luntersebeek.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Dwarsdiep.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/AfwaterendgebiedBoezemsysteem.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/HHRijnland.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/Beemster.shp\", \"/u/imhof_rn/GIS/Catchments_pysteps/DeLinde.shp\"] # Put here the locations of the shapefiles\ncatchment_names = ['Hupsel', 'Regge', 'GroteWaterleiding', 'Aa', 'Reusel', 'Molentje', 'Luntersebeek', 'Dwarsdiep', 'Delfland', 'Rijnland', 'Beemster', 'Linde'] # A list of catchment names.\nout_dir = \"/u/imhof_rn/Nowcasts/pySTEPS\" # Just used for logging, the actual\n# out_dir is set in the pystepsrc-file.\n\n# Verification settings\nverification = {\n \"experiment_name\" : \"pysteps_mpi_24hours_deterministic\",\n \"overwrite\" : True, # to recompute nowcasts\n \"v_thresholds\" : [0.1, 1.0], # [mm/h] \n \"v_leadtimes\" : [10, 30, 60, 90, 120, 150, 180, 210, 240, 270, 300, 330, 360], # [min]\n \"v_accu\" : None, # [min]\n \"seed\" : 42, # for reproducibility\n \"doplot\" : True, # save figures\n \"dosaveresults\" : True # save verification scores to csv\n}\n\n# Forecast settings\nforecast = {\n \"n_lead_times\" : 72, # timesteps per nowcast\n \"r_threshold\" : 0.1, # rain/no rain threshold [mm/h]\n \"unit\" : \"mm/h\", # mm/h or dBZ\n \"transformation\" : \"dB\", # None or dB \n \"adjust_domain\" : None # None or square\n}\n\n# The experiment set-up\n## this includes tuneable parameters\nexperiment = {\n ## the events event start event end update cycle data source\n \"data\" : [(\"200801101205\",\"200801111800\",5,\"knmi\"),\n (\"200801190305\",\"200801200900\",5,\"knmi\"),\n (\"200801191005\",\"200801201600\",5,\"knmi\"),\n (\"200801201705\",\"200801212300\",5,\"knmi\"),\n (\"200802042305\",\"200802060500\",5,\"knmi\"),\n (\"200807070605\",\"200807081200\",5,\"knmi\"),\n (\"200808070405\",\"200808081000\",5,\"knmi\"),\n (\"200812100305\",\"200812110900\",5,\"knmi\"),\n (\"200902091005\",\"200902101600\",5,\"knmi\"),\n (\"200905131705\",\"200905142300\",5,\"knmi\"),\n (\"200905161005\",\"200905171600\",5,\"knmi\"),\n (\"200912091805\",\"200912110000\",5,\"knmi\"),\n (\"201005110005\",\"201005120600\",5,\"knmi\"),\n (\"201006090205\",\"201006100800\",5,\"knmi\"),\n (\"201007101005\",\"201007111600\",5,\"knmi\"),\n (\"201007101105\",\"201007111700\",5,\"knmi\"),\n (\"201008251605\",\"201008262200\",5,\"knmi\"),\n (\"201008252105\",\"201008270300\",5,\"knmi\"),\n (\"201008252205\",\"201008270400\",5,\"knmi\"),\n (\"201008252305\",\"201008270500\",5,\"knmi\"),\n (\"201101120405\",\"201101131000\",5,\"knmi\"),\n (\"201106180405\",\"201106191000\",5,\"knmi\"),\n (\"201107131805\",\"201107150000\",5,\"knmi\"),\n (\"201107210105\",\"201107220700\",5,\"knmi\"),\n (\"201107231105\",\"201107241700\",5,\"knmi\"),\n (\"201107271805\",\"201107290000\",5,\"knmi\"),\n (\"201112151205\",\"201112161800\",5,\"knmi\"),\n (\"201112151305\",\"201112161900\",5,\"knmi\"),\n (\"201112311805\",\"201201020000\",5,\"knmi\"),\n (\"201112312105\",\"201201020300\",5,\"knmi\"),\n (\"201201010905\",\"201201021500\",5,\"knmi\"),\n (\"201201041205\",\"201201051800\",5,\"knmi\"),\n (\"201206120205\",\"201206130800\",5,\"knmi\"),\n (\"201207271505\",\"201207282100\",5,\"knmi\"),\n (\"201208010605\",\"201208021200\",5,\"knmi\"),\n (\"201212220305\",\"201212230900\",5,\"knmi\"),\n (\"201212220505\",\"201212231100\",5,\"knmi\"),\n (\"201212241705\",\"201212252300\",5,\"knmi\"),\n (\"201305200605\",\"201305211200\",5,\"knmi\"),\n (\"201312232205\",\"201312250400\",5,\"knmi\"),\n (\"201407080605\",\"201407091200\",5,\"knmi\"),\n (\"201407101205\",\"201407111800\",5,\"knmi\"),\n (\"201407270605\",\"201407281200\",5,\"knmi\"),\n (\"201407271905\",\"201407290100\",5,\"knmi\"),\n (\"201407280605\",\"201407291200\",5,\"knmi\"),\n (\"201412110705\",\"201412121300\",5,\"knmi\"),\n (\"201412110805\",\"201412121400\",5,\"knmi\"),\n (\"201412111205\",\"201412121800\",5,\"knmi\"),\n (\"201412261705\",\"201412272300\",5,\"knmi\"),\n (\"201501071705\",\"201501082300\",5,\"knmi\"),\n (\"201501120805\",\"201501131400\",5,\"knmi\"),\n (\"201501121005\",\"201501131600\",5,\"knmi\"),\n (\"201501121105\",\"201501131700\",5,\"knmi\"),\n (\"201502200805\",\"201502211400\",5,\"knmi\"),\n (\"201508160405\",\"201508171000\",5,\"knmi\"),\n (\"201511292305\",\"201512010500\",5,\"knmi\"),\n (\"201511300205\",\"201512010800\",5,\"knmi\"),\n (\"201601131405\",\"201601142000\",5,\"knmi\"),\n (\"201601291405\",\"201601302000\",5,\"knmi\"),\n (\"201602081205\",\"201602091800\",5,\"knmi\"),\n (\"201602081305\",\"201602091900\",5,\"knmi\"),\n (\"201603040205\",\"201603050800\",5,\"knmi\"),\n (\"201605220405\",\"201605231000\",5,\"knmi\"),\n (\"201605221505\",\"201605232100\",5,\"knmi\"),\n (\"201605312105\",\"201606020300\",5,\"knmi\"),\n (\"201605312305\",\"201606020500\",5,\"knmi\"),\n (\"201606031605\",\"201606042200\",5,\"knmi\"),\n (\"201607210705\",\"201607221300\",5,\"knmi\"),\n (\"201701120505\",\"201701131100\",5,\"knmi\"),\n (\"201701120805\",\"201701131400\",5,\"knmi\"),\n (\"201701121105\",\"201701131700\",5,\"knmi\"),\n (\"201702212105\",\"201702230300\",5,\"knmi\"),\n (\"201706271405\",\"201706282000\",5,\"knmi\"),\n (\"201707231505\",\"201707242100\",5,\"knmi\"),\n (\"201708100005\",\"201708110600\",5,\"knmi\"),\n (\"201708291205\",\"201708301800\",5,\"knmi\"),\n (\"201708291605\",\"201708302200\",5,\"knmi\"),\n (\"201712080205\",\"201712090800\",5,\"knmi\"),\n (\"201712130805\",\"201712141400\",5,\"knmi\"),\n (\"201712301705\",\"201712312300\",5,\"knmi\"),\n (\"201805310605\",\"201806011200\",5,\"knmi\"),\n (\"201812081205\",\"201812091800\",5,\"knmi\")],\n \n ## the methods\n \"oflow_method\" : [\"lucaskanade\"], # lucaskanade, darts\n \"adv_method\" : [\"semilagrangian\"], # semilagrangian, eulerian\n \"nwc_method\" : [\"extrapolation\"],\n \"noise_method\" : [None], # parametric, nonparametric, ssft\n \"decomp_method\" : [\"fft\"],\n \n ## the parameters\n \"n_ens_members\" : [1],\n \"ar_order\" : [2],\n \"n_cascade_levels\" : [8],\n \"noise_adjustment\" : [False],\n \"conditional\" : [False],\n \"precip_mask\" : [True],\n \"mask_method\" : [\"sprog\"], # obs, incremental, sprog\n \"prob_matching\" : [\"mean\"],\n \"num_workers\" : [1], # Set the number of processors available for parallel computing\n \"vel_pert_method\" : [None], # No velocity pertubation in order to allow for deterministic run following Seed et al. [2003]\n}\n\n# End of initial part\n###############################################################################\n\nstart_time = time.time()\n\n#### HERE ALL AVAILABLE PROCESSES AT START-UP TIME ARE COLLECTED IN comm\n#### SEE FOR MORE INFO ON MPI: https://www.cs.earlham.edu/~lemanal/slides/mpi-slides.pdf \ncomm = MPI.COMM_WORLD\nrank = comm.rank\nsize = comm.size\n\nlogging.info(('I am process rank {}'.format(rank)))\n\n#########################################################\n# Open the catchment shapes - They're needed later for the catchment_slice utils\n#########################################################\nshapes = []\n\nfor i in range(0, len(catchment_filenames)):\n shape_filename = catchment_filenames[i]\n \n # set file names in order to obtain the reprojected shapefile, which \n # was made with the catchment_medata functionality.\n dirname = os.path.dirname(shape_filename)\n basename = os.path.basename(shape_filename)\n basenametxt = os.path.splitext(basename)[0]\n shapes_reprojected = os.path.join(dirname, basenametxt+'_Reprojected.shp')\t\n \n driver = ogr.GetDriverByName('ESRI Shapefile')\n shapes.append(driver.Open(shapes_reprojected))\n\n###########\n# Set some first functions\n###########\n\n## define the callback function to export the nowcast to netcdf\nconverter = stp.utils.get_method(\"mm/h\")\ndef export(X_3D):\n \"\"\"\n X_3D 3D forecast consisting of (lead time, h, w)\n \"\"\"\n\n ## Open the array for lead time t and convert to mm/h\n X,_ = converter(X_3D, metadata)\n # readjust to initial domain shape\n X,_ = reshaper(X, metadata, inverse=True)\n\n # Then, slice the array per catchment or not if no catchments are given\n if catchments == True:\n X_catchment = stp.utils.catchment_slice_mpi(X, shapes)\n # Export to netCDF per catchment\n for n in range(0, len(catchment_filenames)):\n key = list(d.keys())[n]\n stp.io.export_forecast_dataset(np.array([X_catchment[n]]), d[key])\n else:\n # We have to change the 2D array to a 3D array (with just 1 ens member)\n X = np.array([X])\n # else, export full radar nowcast to netcdf\n stp.io.export_forecast_dataset(X, exporter)\n \n X = None\n\n# Conditional parameters\n## parameters that can be directly related to other parameters\ndef cond_pars(pars):\n for key in list(pars):\n if key == \"oflow_method\":\n if pars[key].lower() == \"darts\": pars[\"n_prvs_times\"] = 9\n else: pars[\"n_prvs_times\"] = 3\n elif key.lower() == \"n_cascade_levels\":\n if pars[key] == 1 : pars[\"bandpass_filter\"] = \"uniform\"\n else: pars[\"bandpass_filter\"] = \"gaussian\"\n elif key.lower() == \"nwc_method\":\n if pars[key] == \"extrapolation\" : pars[\"n_ens_members\"] = 1\n return pars\n\n#########\n# Make list of parameters (i.e. the different dates - all other parameters are\n# the same for every run) and scatter these over the nodes.\n#########\n \n# Prepare the list of all parameter sets of the verification\nparsets = [[]]\nfor _, items in experiment.items():\n parsets = [parset+[item] for parset in parsets for item in items]\n\nif rank == 0:\n #### Reorganize work a bit so we can scatter it\n keyfunc = lambda x:x[0] % size\n work = itertools.groupby(sorted(enumerate(parsets), key=keyfunc), keyfunc)\n \n #### Expand the work so we get lists of row, col per node\n workpernode = [[x[1] for x in val] for (key, val) in work]\nelse:\n workpernode = None\n\n#### NOW DISTRIBUTE THE WORK\nworkpernode = comm.scatter(workpernode, root=0)\n\nlogging.info(\"Got the following work in process rank {} : {}\".format(rank, workpernode))\n\n#### Each node can now do it's own work. The main advantage is that we can do a gather at the end to collect all results.\n#### Keep track of all the runs per node in scores\n#scores = []\n\n#### before starting any runs, make sure that you know in which folder we run this MPI run routine. \n#### Always return to this folder before the next run\n#curdir = os.getcwd()\nos.chdir('/u/imhof_rn/pysteps-master')\n\n###########\n# Run the model in parallel\n###########\n\n# Now loop all parameter sets\nfor n, parset in enumerate(workpernode):\n# logging.info(\"rank %02.f computing scores for parameter set nr %04.f\" % (rank, n))\n runId = '%s_%04.f' % (out_dir, n)\n \n # Build parameter set\n \n p = {}\n for m, key in enumerate(experiment.keys()):\n p[key] = parset[m]\n ## apply conditional parameters\n p = cond_pars(p)\n ## include all remaining parameters\n p.update(verification)\n p.update(forecast)\n \n# print(\"************************\")\n# print(\"* Parameter set %02d/%02d: *\" % (n+1, len(parsets)))\n# print(\"************************\")\n \n# pprint.pprint(p)\n \n # If necessary, build path to results\n path_to_experiment = os.path.join(cfg.path_outputs, p[\"experiment_name\"])\n # subdir with event date\n path_to_nwc = os.path.join(path_to_experiment, '-'.join([p[\"data\"][0], p[\"data\"][3]]))\n# for key, item in p.items():\n#\t\t# include only variables that change\n# if len(experiment.get(key,[None])) > 1 and key.lower() is not \"data\":\n# path_to_nwc = os.path.join(path_to_nwc, '-'.join([key, str(item)]))\n try:\n os.makedirs(path_to_nwc)\n except OSError:\n pass\n \n # **************************************************************************\n # NOWCASTING\n # ************************************************************************** \n \n # Loop forecasts within given event using the prescribed update cycle interval\n\n ## import data specifications\n ds = cfg.get_specifications(p[\"data\"][3])\n \n if p[\"v_accu\"] is None:\n p[\"v_accu\"] = ds.timestep\n \n # Loop forecasts for given event\n startdate = datetime.datetime.strptime(p[\"data\"][0], \"%Y%m%d%H%M\")\n enddate = datetime.datetime.strptime(p[\"data\"][1], \"%Y%m%d%H%M\")\n countnwc = 0\n while startdate <= enddate:\n try:\n \n # filename of the nowcast netcdf. Set name either per catchment or as \n # total nowcast for the entire radar image.\n if catchments == True:\n outfn = []\n for n in range(0, len(catchment_names)):\n path_to_catchment = os.path.join(path_to_nwc, catchment_names[n])\n try:\n os.makedirs(path_to_catchment)\n Name = os.path.join(path_to_catchment, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n outfn.append(Name)\n except OSError:\n print(\"Catchment outfile directory does already exist for starttime: %s\" % startdate.strftime(\"%Y%m%d%H%M\"))\n Name = os.path.join(path_to_catchment, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n outfn.append(Name)\n else:\n outfn = os.path.join(path_to_nwc, \"%s_nowcast.netcdf\" % startdate.strftime(\"%Y%m%d%H%M\"))\n \n ## check if results already exists\n if catchments == True:\n run_exist = False\n if os.path.isfile(outfn[n]):\n fid = netCDF4.Dataset(outfn[n], 'r')\n if fid.dimensions[\"time\"].size == p[\"n_lead_times\"]:\n run_exist = True\n if p[\"overwrite\"]:\n os.remove(outfn[n])\n run_exist = False \n else:\n os.remove(outfn[n])\n else:\n run_exist = False\n if os.path.isfile(outfn):\n fid = netCDF4.Dataset(outfn, 'r')\n if fid.dimensions[\"time\"].size == p[\"n_lead_times\"]:\n run_exist = True\n if p[\"overwrite\"]:\n os.remove(outfn)\n run_exist = False \n else:\n os.remove(outfn)\n \n if run_exist:\n print(\"Nowcast %s_nowcast already exists in %s\" % (startdate.strftime(\"%Y%m%d%H%M\"),path_to_nwc))\n \n else:\n countnwc += 1\n print(\"Computing the nowcast (%02d) ...\" % countnwc)\n \n print(\"Starttime: %s\" % startdate.strftime(\"%Y%m%d%H%M\"))\n \n ## redirect stdout to log file\n logfn = os.path.join(path_to_nwc, \"%s_log.txt\" % startdate.strftime(\"%Y%m%d%H%M\")) \n print(\"Log: %s\" % logfn)\n orig_stdout = sys.stdout\n f = open(logfn, 'w')\n sys.stdout = f\n \n print(\"*******************\")\n print(\"* %s *****\" % startdate.strftime(\"%Y%m%d%H%M\"))\n print(\"* Parameter set : *\")\n # pprint.pprint(p) \n print(\"*******************\")\n \n print(\"--- Start of the run : %s ---\" % (datetime.datetime.now()))\n \n ## time\n t0 = time.time()\n \n # Read inputs\n # print(\"Read the data...\")\n \n ## find radar field filenames\n input_files = stp.io.find_by_date(startdate, ds.root_path, ds.path_fmt, ds.fn_pattern,\n ds.fn_ext, ds.timestep, p[\"n_prvs_times\"])\n \n \n ## read radar field files\n importer = stp.io.get_method(ds.importer, type=\"importer\")\n R, _, metadata = stp.io.read_timeseries(input_files, importer, **ds.importer_kwargs)\n metadata0 = metadata.copy()\n metadata0[\"shape\"] = R.shape[1:]\n \n # Prepare input files\n # print(\"Prepare the data...\")\n \n ## if requested, make sure we work with a square domain\n reshaper = stp.utils.get_method(p[\"adjust_domain\"])\n R, metadata = reshaper(R, metadata)\n \n ## if necessary, convert to rain rates [mm/h] \n converter = stp.utils.get_method(\"mm/h\")\n R, metadata = converter(R, metadata)\n \n ## threshold the data\n R[R < p[\"r_threshold\"]] = 0.0\n metadata[\"threshold\"] = p[\"r_threshold\"]\n \n ## convert the data\n converter = stp.utils.get_method(p[\"unit\"])\n R, metadata = converter(R, metadata)\n \n ## transform the data\n transformer = stp.utils.get_method(p[\"transformation\"])\n R, metadata = transformer(R, metadata)\n \n ## set NaN equal to zero\n R[~np.isfinite(R)] = metadata[\"zerovalue\"]\n \n # Compute motion field\n oflow_method = stp.motion.get_method(p[\"oflow_method\"])\n UV = oflow_method(R)\n \n #####\n # Perform the nowcast \n #####\n \n ## initialize netcdf file\n incremental = \"timestep\" if p[\"nwc_method\"].lower() == \"steps\" else None\n if catchments == True:\n metadata_new = stp.utils.catchment_metadata_mpi(shapes, metadata0)\n d = {} \n for n in range(0, len(catchment_filenames)):\n d[\"exporter_{0}\".format(n)] = stp.io.initialize_forecast_exporter_netcdf(outfn[n], startdate,\n ds.timestep, p[\"n_lead_times\"], metadata_new[n][\"shape\"], \n p[\"n_ens_members\"], metadata_new[n], incremental=incremental)\n else:\n exporter = stp.io.initialize_forecast_exporter_netcdf(outfn, startdate,\n ds.timestep, p[\"n_lead_times\"], metadata0[\"shape\"], \n p[\"n_ens_members\"], metadata0, incremental=incremental)\n \n ## start the nowcast\n nwc_method = stp.nowcasts.get_method(p[\"nwc_method\"])\n R_fct = nwc_method(R[-1,:,:], UV, p[\"n_lead_times\"], extrap_method=p[\"adv_method\"])\n \n print(R_fct.shape[0])\n \n export(R_fct)\n \n ## save results, either per catchment or in total\n if catchments == True:\n for n in range(0, len(catchment_filenames)):\n key = list(d.keys())[n]\n stp.io.close_forecast_file(d[key])\n else:\n stp.io.close_forecast_file(exporter)\n R_fct = None\n \n # save log\n print(\"--- End of the run : %s ---\" % (datetime.datetime.now()))\n print(\"--- Total time : %s seconds ---\" % (time.time() - t0))\n sys.stdout = orig_stdout\n f.close()\n \n # next forecast\n startdate += datetime.timedelta(minutes = p[\"data\"][2])\n \n except ValueError:\n print('ValueError')\n # next forecast\n startdate += datetime.timedelta(minutes = p[\"data\"][2])\n\n# tr.print_diff()\n# scores.append(n)\n #### RETURN TO THE CORRECT DIRECTORY, JUST IN CASE SOMETHING WAS CHANGED...\n os.chdir('/u/imhof_rn/pysteps-master')\n\n#### Wait here so we can collect all runs\n#### Because we distributed the work evenly all processes should be here at approximately the same time\ncomm.Barrier()\n#### Great, we're all here. Now let's gather the scores...\n#### Collect values from all the processes in the main root\n#scores = comm.gather(scores, root=0)\n\n#logging.debug(\"Rank {} has scores {}\".format(rank, scores))\n \nend_time = time.time()\n\nprint('Total process took', (end_time - start_time)/3600.0, 'hours') ",
"\"\"\"Implementation of the semi-Lagrangian method of Germann et al (2002).\"\"\"\n\nimport numpy as np\nimport scipy.ndimage.interpolation as ip\nimport time\n\ndef extrapolate(R, V, num_timesteps, outval=np.nan, **kwargs):\n \"\"\"Apply semi-Lagrangian extrapolation to a two-dimensional precipitation\n field.\n\n Parameters\n ----------\n R : array-like\n Array of shape (m,n) containing the input precipitation field. All\n values are required to be finite.\n V : array-like\n Array of shape (2,m,n) containing the x- and y-components of the m*n\n advection field. All values are required to be finite.\n num_timesteps : int\n Number of time steps to extrapolate.\n outval : float\n Optional argument for specifying the value for pixels advected from\n outside the domain. If outval is set to 'min', the value is taken as\n the minimum value of R.\n Default : np.nan\n\n Other Parameters\n ----------------\n D_prev : array-like\n Optional initial displacement vector field of shape (2,m,n) for the\n extrapolation.\n Default : None\n n_iter : int\n Number of inner iterations in the semi-Lagrangian scheme.\n Default : 3\n inverse : bool\n If True, the extrapolation trajectory is computed backward along the\n flow (default), forward otherwise.\n Default : True\n return_displacement : bool\n If True, return the total advection velocity (displacement) between the\n initial input field and the advected one integrated along the trajectory.\n Default : False\n\n Returns\n -------\n out : array or tuple\n If return_displacement=False, return a time series extrapolated fields of\n shape (num_timesteps,m,n). Otherwise, return a tuple containing the\n extrapolated fields and the total displacement along the advection trajectory.\n\n References\n ----------\n :cite:`GZ2002`\n\n \"\"\"\n if len(R.shape) != 2:\n raise ValueError(\"R must be a two-dimensional array\")\n\n if len(V.shape) != 3:\n raise ValueError(\"V must be a three-dimensional array\")\n\n if np.any(~np.isfinite(R)):\n raise ValueError(\"R contains non-finite values\")\n\n if np.any(~np.isfinite(V)):\n raise ValueError(\"V contains non-finite values\")\n\n # defaults\n verbose = kwargs.get(\"verbose\", False)\n D_prev = kwargs.get(\"D_prev\", None)\n n_iter = kwargs.get(\"n_iter\", 3)\n inverse = kwargs.get(\"inverse\", True)\n return_displacement = kwargs.get(\"return_displacement\", False)\n\n if verbose:\n print(\"Computing the advection with the semi-lagrangian scheme.\")\n t0 = time.time()\n\n if outval == \"min\":\n outval = np.nanmin(R)\n\n coeff = 1.0 if not inverse else -1.0\n\n X,Y = np.meshgrid(np.arange(V.shape[2]), np.arange(V.shape[1]))\n XY = np.stack([X, Y])\n\n R_e = []\n if D_prev is None:\n D = np.zeros((2, V.shape[1], V.shape[2]))\n else:\n D = D_prev.copy()\n\n for t in range(num_timesteps):\n V_inc = np.zeros(D.shape)\n\n for k in range(n_iter):\n if t > 0 or k > 0 or D_prev is not None:\n XYW = XY + D - V_inc / 2.0\n XYW = [XYW[1, :, :], XYW[0, :, :]]\n\n VWX = ip.map_coordinates(V[0, :, :], XYW, mode=\"nearest\", order=0,\n prefilter=False)\n VWY = ip.map_coordinates(V[1, :, :], XYW, mode=\"nearest\", order=0,\n prefilter=False)\n else:\n VWX = V[0, :, :]\n VWY = V[1, :, :]\n\n V_inc[0, :, :] = VWX / n_iter\n V_inc[1, :, :] = VWY / n_iter\n\n D += coeff * V_inc\n\n XYW = XY + D\n XYW = [XYW[1, :, :], XYW[0, :, :]]\n\n IW = ip.map_coordinates(R, XYW, mode=\"constant\", cval=outval, order=0,\n prefilter=False)\n R_e.append(np.reshape(IW, R.shape))\n\n if verbose:\n print(\"--- %s seconds ---\" % (time.time() - t0))\n\n if not return_displacement:\n return np.stack(R_e)\n else:\n return np.stack(R_e), D\n",
"# -*- coding: utf-8 -*-\n#\n# Licensed under the BSD-3-Clause license\n# Copyright (c) 2018, Andres A. Perez Hortal\n\"\"\"\nVariational Echo Tracking (VET) Module\n\n\nThis module implements the VET algorithm presented\nby `Laroche and Zawadzki (1995)`_ and used in the\nMcGill Algorithm for Prediction by Lagrangian Extrapolation (MAPLE) described\nin `Germann and Zawadzki (2002)`_.\n\n\n.. _`Laroche and Zawadzki (1995)`:\\\n http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n\n.. _`Germann and Zawadzki (2002)`:\\\n http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2\n\nThe morphing and the cost functions are implemented in Cython and parallelized\nfor performance.\n\"\"\"\n\nimport numpy\nfrom numpy.ma.core import MaskedArray\nfrom scipy.ndimage.interpolation import zoom\nfrom scipy.optimize import minimize\n\nfrom pysteps.motion._vet import _warp, _cost_function\n\n\ndef round_int(scalar):\n \"\"\"\n Round number to nearest integer. Returns and integer value.\n \"\"\"\n return int(numpy.round(scalar))\n\n\ndef ceil_int(scalar):\n \"\"\"\n Round number to nearest integer. Returns and integer value.\n \"\"\"\n return int(numpy.ceil(scalar))\n\n\ndef get_padding(dimension_size, sectors):\n \"\"\"\n Get the padding at each side of the one dimensions of the image\n so the new image dimensions are divided evenly in the\n number of *sectors* specified.\n\n Parameters\n ----------\n\n dimension_size : int\n Actual dimension size.\n\n sectors : int\n number of sectors over which the the image will be divided.\n\n Return\n ------\n\n pad_before , pad_after: int, int\n Padding at each side of the image for the corresponding dimension.\n \"\"\"\n reminder = dimension_size % sectors\n\n if reminder != 0:\n pad = sectors - reminder\n pad_before = pad // 2\n if pad % 2 == 0:\n pad_after = pad_before\n else:\n pad_after = pad_before + 1\n\n return pad_before, pad_after\n\n return 0, 0\n\n\ndef morph(image, displacement, gradient=False):\n \"\"\"\n Morph image by applying a displacement field (Warping).\n\n The new image is created by selecting for each position the values of the\n input image at the positions given by the x and y displacements.\n The routine works in a backward sense.\n The displacement vectors have to refer to their destination.\n\n For more information in Morphing functions see Section 3 in\n `Beezley and Mandel (2008)`_.\n\n Beezley, J. D., & Mandel, J. (2008).\n Morphing ensemble Kalman filters. Tellus A, 60(1), 131-140.\n\n .. _`Beezley and Mandel (2008)`: http://dx.doi.org/10.1111/\\\n j.1600-0870.2007.00275.x\n\n\n The displacement field in x and y directions and the image must have the\n same dimensions.\n\n The morphing is executed in parallel over x axis.\n\n The value of displaced pixels that fall outside the limits takes the\n value of the nearest edge. Those pixels are indicated by values greater\n than 1 in the output mask.\n\n Parameters\n ----------\n\n image : ndarray (ndim = 2)\n Image to morph\n\n displacement : ndarray (ndim = 3)\n Displacement field to be applied (Warping). The first dimension\n corresponds to the coordinate to displace.\n\n The dimensions are:\n displacement [ i/x (0) or j/y (1) ,\n i index of pixel, j index of pixel ]\n\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n\n Returns\n -------\n\n image : ndarray (float64 ,ndim = 2)\n Morphed image.\n\n mask : ndarray (int8 ,ndim = 2)\n Invalid values mask. Points outside the boundaries are masked.\n Values greater than 1, indicate masked values.\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n\n \"\"\"\n\n if not isinstance(image, MaskedArray):\n _mask = numpy.zeros_like(image, dtype='int8')\n else:\n _mask = numpy.asarray(numpy.ma.getmaskarray(image),\n dtype='int8')\n\n _image = numpy.asarray(image, dtype='float64', order='C')\n _displacement = numpy.asarray(displacement, dtype='float64', order='C')\n\n return _warp(_image, _mask, _displacement, gradient=gradient)\n\n\ndef vet_cost_function_gradient(*args, **kwargs):\n kwargs[\"gradient\"] = True\n return vet_cost_function(*args, **kwargs)\n\n\ndef vet_cost_function(sector_displacement_1d,\n input_images,\n blocks_shape,\n mask,\n smooth_gain,\n debug=False,\n gradient=False):\n \"\"\"\n Variational Echo Tracking Cost Function.\n\n .. _`scipy.optimize.minimize` :\\\n https://docs.scipy.org/doc/scipy-0.18.1/reference/\\\n generated/scipy.optimize.minimize.html\n\n This function is designed to be used with the `scipy.optimize.minimize`_\n\n The function first argument is the variable to be used in the\n minimization procedure.\n\n The sector displacement must be a flat array compatible with the\n dimensions of the input image and sectors shape (see parameters section\n below for more details).\n\n .. _ndarray:\\\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html\n\n\n Parameters\n ----------\n\n sector_displacement_1d : ndarray_\n Array of displacements to apply to each sector. The dimensions are:\n sector_displacement_2d\n [ x (0) or y (1) displacement, i index of sector, j index of sector ].\n The shape of the sector displacements must be compatible with the\n input image and the block shape.\n The shape should be (2, mx, my) where mx and my are the numbers of\n sectors in the x and the y dimension.\n\n input_images : ndarray_\n Input images, sequence of 2D arrays, or 3D arrays.\n The first dimension represents the images time dimension.\n\n The template_image (first element in first dimensions) denotes the\n reference image used to obtain the displacement (2D array).\n The second is the target image.\n\n The expected dimensions are (2,nx,ny).\n Be aware the the 2D images dimensions correspond to (lon,lat) or (x,y).\n\n blocks_shape : ndarray_ (ndim=2)\n Number of sectors in each dimension (x and y).\n blocks_shape.shape = (mx,my)\n\n mask : ndarray_ (ndim=2)\n Data mask. If is True, the data is marked as not valid and is not\n used in the computations.\n\n smooth_gain : float\n Smoothness constrain gain\n\n debug : bool, optional\n If True, print debugging information.\n\n gradient : bool, optional\n If True, the gradient of the morphing function is returned.\n\n Returns\n -------\n\n penalty or gradient values.\n\n penalty : float\n Value of the cost function\n\n gradient_values : ndarray (float64 ,ndim = 3), optional\n If gradient keyword is True, the gradient of the function is also\n returned.\n\n \"\"\"\n\n sector_displacement_2d = \\\n sector_displacement_1d.reshape(*((2,) + tuple(blocks_shape)))\n\n if input_images.shape[0] == 3:\n three_times = True\n previous_image = input_images[0]\n center_image = input_images[1]\n next_image = input_images[2]\n\n else:\n previous_image = None\n center_image = input_images[0]\n next_image = input_images[1]\n three_times = False\n\n if gradient:\n gradient_values = _cost_function(sector_displacement_2d,\n center_image,\n next_image,\n mask,\n smooth_gain,\n gradient=True)\n if three_times:\n gradient_values += _cost_function(sector_displacement_2d,\n previous_image,\n center_image,\n mask,\n smooth_gain,\n gradient=True)\n\n return gradient_values.ravel()\n\n else:\n residuals, smoothness_penalty = _cost_function(sector_displacement_2d,\n center_image,\n next_image,\n mask,\n smooth_gain,\n gradient=False)\n\n if three_times:\n _residuals, _smoothness = _cost_function(sector_displacement_2d,\n previous_image,\n center_image,\n mask,\n smooth_gain,\n gradient=False)\n\n residuals += _residuals\n smoothness_penalty += _smoothness\n\n if debug:\n print(\"\\nresiduals\", residuals)\n print(\"smoothness_penalty\", smoothness_penalty)\n\n return residuals + smoothness_penalty\n\n\ndef vet(input_images,\n sectors=((32, 16, 4, 2), (32, 16, 4, 2)),\n smooth_gain=1e6,\n first_guess=None,\n intermediate_steps=False,\n verbose=True,\n indexing='yx',\n padding=0,\n options=None):\n \"\"\"\n Variational Echo Tracking Algorithm presented in\n `Laroche and Zawadzki (1995)`_ and used in the McGill Algorithm for\n Prediction by Lagrangian Extrapolation (MAPLE) described in\n `Germann and Zawadzki (2002)`_.\n\n .. _`Laroche and Zawadzki (1995)`:\\\n http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n\n .. _`Germann and Zawadzki (2002)`:\\\n http://dx.doi.org/10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2\n\n This algorithm computes the displacement field between two images\n ( the input_image with respect to the template image).\n The displacement is sought by minimizing sum of the residuals of the\n squared differences of the images pixels and the contribution of a\n smoothness constrain.\n\n In order to find the minimum an scaling guess procedure is applied,\n from larger scales\n to a finer scale. This reduces the changes that the minimization procedure\n converges to a local minimum. The scaling guess is defined by the scaling\n sectors (see **sectors** keyword).\n\n The smoothness of the returned displacement field is controlled by the\n smoothness constrain gain (**smooth_gain** keyword).\n\n If a first guess is not given, zero displacements are used as first guess.\n\n To minimize the cost function, the `scipy minimization`_ function is used\n with the 'CG' method. This method proved to give the best results under\n any different conditions and is the most similar one to the original VET\n implementation in `Laroche and Zawadzki (1995)`_.\n\n\n The method CG uses a nonlinear conjugate gradient algorithm by Polak and\n Ribiere, a variant of the Fletcher-Reeves method described in\n Nocedal and Wright (2006), pp. 120-122.\n\n\n .. _MaskedArray: https://docs.scipy.org/doc/numpy/reference/\\\n maskedarray.baseclass.html#numpy.ma.MaskedArray\n\n .. _ndarray:\\\n https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html\n\n Parameters\n ----------\n\n input_images : ndarray_ or MaskedArray\n Input images, sequence of 2D arrays, or 3D arrays.\n The first dimension represents the images time dimension.\n\n The template_image (first element in first dimensions) denotes the\n reference image used to obtain the displacement (2D array).\n The second is the target image.\n\n The expected dimensions are (2,ni,nj).\n\n sectors : list or array, optional\n The number of sectors for each dimension used in the scaling procedure.\n If dimension is 1, the same sectors will be used both image dimensions\n (x and y). If is 2D, the each row determines the sectors of the\n each dimension.\n\n smooth_gain : float, optional\n Smooth gain factor\n\n first_guess : ndarray_, optional_\n The shape of the first guess should have the same shape as the initial\n sectors shapes used in the scaling procedure.\n If first_guess is not present zeros are used as first guess.\n\n E.g.:\n If the first sector shape in the scaling procedure is (ni,nj), then\n the first_guess should have (2, ni, nj ) shape.\n\n intermediate_steps : bool, optional\n If True, also return a list with the first guesses obtained during the\n scaling procedure. False, by default.\n\n verbose : bool, optional\n Verbosity enabled if True (default).\n\n indexing : str, optional\n Input indexing order.'ij' and 'xy' indicates that the\n dimensions of the input are (time, longitude, latitude), while\n 'yx' indicates (time, latitude, longitude).\n The displacement field dimensions are ordered accordingly in a way that\n the first dimension indicates the displacement along x (0) or y (1).\n That is, UV displacements are always returned.\n\n padding : int\n Padding width in grid points. A border is added to the input array\n to reduce the effects of the minimization at the border.\n\n options : dict, optional\n A dictionary of solver options.\n See `scipy minimization`_ function for more details.\n\n .. _`scipy minimization` : https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html\n\n Returns\n -------\n\n displacement_field : ndarray_\n Displacement Field (2D array representing the transformation) that\n warps the template image into the input image.\n The dimensions are (2,ni,nj), where the first\n dimension indicates the displacement along x (0) or y (1).\n\n intermediate_steps : list of ndarray_\n List with the first guesses obtained during the scaling procedure.\n\n References\n ----------\n\n Laroche, S., and I. Zawadzki, 1995:\n Retrievals of horizontal winds from single-Doppler clear-air data by\n methods of cross-correlation and variational analysis.\n J. Atmos. Oceanic Technol., 12, 721–738.\n doi: http://dx.doi.org/10.1175/1520-0426(1995)012<0721:ROHWFS>2.0.CO;2\n\n Germann, U. and I. Zawadzki, 2002:\n Scale-Dependence of the Predictability of Precipitation from Continental\n Radar Images. Part I: Description of the Methodology.\n Mon. Wea. Rev., 130, 2859–2873,\n doi: 10.1175/1520-0493(2002)130<2859:SDOTPO>2.0.CO;2.\n\n Nocedal, J, and S J Wright. 2006. Numerical Optimization. Springer New York.\n\n \"\"\"\n\n if verbose:\n def debug_print(*args, **kwargs):\n print(*args, **kwargs)\n else:\n def debug_print(*args, **kwargs):\n del args\n del kwargs\n\n if options is None:\n options = dict()\n else:\n options = dict(options)\n\n options.setdefault('eps', 0.1)\n options.setdefault('gtol', 0.1)\n options.setdefault('maxiter', 100)\n options.setdefault('disp', False)\n\n # Set to None to suppress pylint warning.\n pad_i = None\n pad_j = None\n sectors_in_i = None\n sectors_in_j = None\n\n debug_print(\"Running VET algorithm\")\n\n if (input_images.ndim != 3) or (1 < input_images.shape[0] > 3):\n raise ValueError(\"input_images dimension mismatch.\\n\" +\n \"input_images.shape: \" + str(input_images.shape) +\n \"\\n(2, x, y ) or (2, x, y ) dimensions expected\")\n\n valid_indexing = ['yx', 'xy', 'ij']\n\n if indexing not in valid_indexing:\n raise ValueError(\"Invalid indexing valus: {0}\\n\".format(indexing)\n + \"Supported values: {0}\".format(str(valid_indexing)))\n\n # Get mask\n if isinstance(input_images, MaskedArray):\n mask = numpy.ma.getmaskarray(input_images)\n else:\n # Mask invalid data\n if padding > 0:\n padding_tuple = ((0, 0), (padding, padding), (padding, padding))\n\n input_images = numpy.pad(input_images,\n padding_tuple,\n 'constant',\n constant_values=numpy.nan)\n\n input_images = numpy.ma.masked_invalid(input_images)\n mask = numpy.ma.getmaskarray(input_images)\n\n input_images[mask] = 0 # Remove any Nan from the raw data\n\n # Create a 2D mask with the right data type for _vet\n mask = numpy.asarray(numpy.any(mask, axis=0), dtype='int8')\n\n input_images = numpy.asarray(input_images.data, dtype='float64')\n\n # Check that the sectors divide the domain\n sectors = numpy.asarray(sectors, dtype=\"int\")\n\n if sectors.ndim == 1:\n\n new_sectors = (numpy.zeros((2,) + sectors.shape, dtype='int')\n + sectors.reshape((1, sectors.shape[0]))\n )\n sectors = new_sectors\n elif sectors.ndim > 2 or sectors.ndim < 1:\n raise ValueError(\"Incorrect sectors dimensions.\\n\"\n + \"Only 1D or 2D arrays are supported to define\"\n + \"the number of sectors used in\"\n + \"the scaling procedure\")\n\n # Sort sectors in descending order\n sectors[0, :].sort()\n sectors[1, :].sort()\n\n # Prepare first guest\n first_guess_shape = (2, int(sectors[0, 0]), int(sectors[1, 0]))\n\n if first_guess is None:\n first_guess = numpy.zeros(first_guess_shape, order='C')\n else:\n if first_guess.shape != first_guess_shape:\n raise ValueError(\n \"The shape of the initial guess do not match the number of \"\n + \"sectors of the first scaling guess\\n\"\n + \"first_guess.shape={}\\n\".format(str(first_guess.shape))\n + \"Expected shape={}\".format(str(first_guess_shape)))\n else:\n first_guess = numpy.asarray(first_guess, order='C', dtype='float64')\n\n scaling_guesses = list()\n\n previous_sectors_in_i = sectors[0, 0]\n previous_sectors_in_j = sectors[1, 0]\n\n for n, (sectors_in_i, sectors_in_j) in enumerate(zip(sectors[0, :],\n sectors[1, :])):\n\n # Minimize for each sector size\n pad_i = get_padding(input_images.shape[1], sectors_in_i)\n pad_j = get_padding(input_images.shape[2], sectors_in_j)\n\n if (pad_i != (0, 0)) or (pad_j != (0, 0)):\n\n _input_images = numpy.pad(input_images, ((0, 0), pad_i, pad_j), 'edge')\n\n _mask = numpy.pad(mask, (pad_i, pad_j),\n 'constant',\n constant_values=1)\n\n if first_guess is None:\n first_guess = numpy.pad(first_guess, ((0, 0), pad_i, pad_j), 'edge')\n else:\n _input_images = input_images\n _mask = mask\n\n sector_shape = (_input_images.shape[1] // sectors_in_i,\n _input_images.shape[2] // sectors_in_j)\n\n debug_print(\"original image shape: \" + str(_input_images.shape))\n debug_print(\"padded image shape: \" + str(_input_images.shape))\n debug_print(\"padded template_image image shape: \"\n + str(_input_images.shape))\n\n debug_print(\"\\nNumber of sectors: {0:d},{1:d}\".format(sectors_in_i,\n sectors_in_j))\n\n debug_print(\"Sector Shape:\", sector_shape)\n\n if n > 0:\n first_guess = zoom(first_guess,\n (1,\n sectors_in_i / previous_sectors_in_i,\n sectors_in_j / previous_sectors_in_j),\n order=1, mode='nearest')\n\n debug_print(\"Minimizing\")\n\n result = minimize(vet_cost_function,\n first_guess.flatten(),\n jac=vet_cost_function_gradient,\n args=(_input_images,\n (sectors_in_i, sectors_in_j),\n _mask,\n smooth_gain),\n method='CG',\n options=options)\n\n first_guess = result.x.reshape(*first_guess.shape)\n\n if verbose:\n vet_cost_function(result.x,\n _input_images,\n (sectors_in_i, sectors_in_j),\n _mask,\n smooth_gain,\n debug=True)\n if indexing == 'yx':\n scaling_guesses.append(first_guess[::-1, ...])\n else:\n scaling_guesses.append(first_guess)\n\n previous_sectors_in_i = sectors_in_i\n previous_sectors_in_j = sectors_in_j\n\n first_guess = zoom(first_guess,\n (1,\n _input_images.shape[1] / sectors_in_i,\n _input_images.shape[2] / sectors_in_j),\n order=1, mode='nearest')\n\n # Remove the extra padding if any\n ni = _input_images.shape[1]\n nj = _input_images.shape[2]\n\n first_guess = first_guess[:, pad_i[0]:ni - pad_i[1], pad_j[0]:nj - pad_j[1]]\n\n if indexing == 'yx':\n first_guess = first_guess[::-1, ...]\n\n if padding > 0:\n first_guess = first_guess[:, padding:-padding, padding:-padding]\n\n if intermediate_steps:\n return first_guess, scaling_guesses\n\n return first_guess\n",
"\"\"\"Miscellaneous utility functions related to generation of stochastic perturbations.\"\"\"\n\nimport numpy as np\ntry:\n import dask\n dask_imported = True\nexcept ImportError:\n dask_imported = False\n# Use the pyfftw interface if it is installed. If not, fall back to the fftpack\n# interface provided by SciPy, and finally to numpy if SciPy is not installed.\ntry:\n import pyfftw.interfaces.numpy_fft as fft\n import pyfftw\n # TODO: Caching and multithreading currently disabled because they give a\n # segfault with dask.\n #pyfftw.interfaces.cache.enable()\n fft_kwargs = {\"threads\":1, \"planner_effort\":\"FFTW_ESTIMATE\"}\nexcept ImportError:\n import scipy.fftpack as fft\n fft_kwargs = {}\nexcept ImportError:\n import numpy.fft as fft\n fft_kwargs = {}\n\ndef compute_noise_stddev_adjs(R, R_thr_1, R_thr_2, F, decomp_method, num_iter,\n conditional=True, num_workers=None):\n \"\"\"Apply a scale-dependent adjustment factor to the noise fields used in STEPS.\n\n Simulates the effect of applying a precipitation mask to a Gaussian noise\n field obtained by the nonparametric filter method. The idea is to decompose\n the masked noise field into a cascade and compare the standard deviations\n of each level into those of the observed precipitation intensity field.\n This gives correction factors for the standard deviations :cite:`BPS2006`.\n The calculations are done for n realizations of the noise field, and the\n correction factors are calculated from the average values of the standard\n deviations.\n\n Parameters\n ----------\n R : array_like\n The input precipitation field, assumed to be in logarithmic units\n (dBR or reflectivity).\n R_thr_1 : float\n Intensity threshold for precipitation/no precipitation.\n R_thr_2 : float\n Intensity values below R_thr_1 are set to this value.\n F : dict\n A bandpass filter dictionary returned by a method defined in\n pysteps.cascade.bandpass_filters. This defines the filter to use and\n the number of cascade levels.\n decomp_method : function\n A function defined in pysteps.cascade.decomposition. Specifies the\n method to use for decomposing the observed precipitation field and\n noise field into different spatial scales.\n num_iter : int\n The number of noise fields to generate.\n conditional : bool\n If set to True, compute the statistics conditionally by excluding areas\n of no precipitation.\n num_workers : int\n The number of workers to use for parallel computation. Set to None to\n use all available CPUs. Applicable if dask is enabled.\n\n Returns\n -------\n out : list\n A list containing the standard deviation adjustment factor for each\n cascade level.\n\n \"\"\"\n\n MASK = R >= R_thr_1\n\n R = R.copy()\n R[~np.isfinite(R)] = R_thr_2\n R[~MASK] = R_thr_2\n if not conditional:\n mu,sigma = np.mean(R),np.std(R)\n else:\n mu,sigma = np.mean(R[MASK]),np.std(R[MASK])\n R -= mu\n\n MASK_ = MASK if conditional else None\n decomp_R = decomp_method(R, F, MASK=MASK_)\n\n if not dask_imported:\n N_stds = []\n else:\n res = []\n\n randstates = []\n seed = None\n for k in range(num_iter):\n randstates.append(np.random.RandomState(seed=seed))\n seed = np.random.randint(0, high=1e9)\n\n R_fft = abs(fft.fft2(R))\n\n for k in range(num_iter):\n def worker():\n # generate Gaussian white noise field, multiply it with the standard\n # deviation of the observed field and apply the precipitation mask\n N = randstates[k].randn(R.shape[0], R.shape[1])\n N = np.real(fft.ifft2(fft.fft2(N) * R_fft))\n N = N / np.std(N) * sigma + mu\n N[~MASK] = R_thr_2\n\n # subtract the mean and decompose the masked noise field into a\n # cascade\n N -= mu\n decomp_N = decomp_method(N, F, MASK=MASK_)\n\n return decomp_N[\"stds\"]\n\n if dask_imported:\n res.append(dask.delayed(worker)())\n else:\n N_stds.append(worker())\n\n if dask_imported:\n N_stds = dask.compute(*res, num_workers=num_workers)\n\n # for each cascade level, compare the standard deviations between the\n # observed field and the masked noise field, which gives the correction\n # factors\n return decomp_R[\"stds\"] / np.mean(np.vstack(N_stds), axis=0)\n",
"\"\"\"Forecast evaluation and skill scores for deterministic categorial forecasts.\"\"\"\n\nimport numpy as np\n\ndef det_cat_fcst(pred, obs, thr, scores):\n \"\"\"Calculate simple and skill scores for deterministic categorical forecasts.\n\n Parameters\n ----------\n pred : array_like\n predictions\n obs : array_like\n verifying observations\n scores : list\n a list containing the names of the scores to be computed, the full list\n is:\n\n +------------+--------------------------------------------------------+\n | Name | Description |\n +============+========================================================+\n | ACC | accuracy (proportion correct) |\n +------------+--------------------------------------------------------+\n | BIAS | frequency bias |\n +------------+--------------------------------------------------------+\n | CSI | critical success index (threat score) |\n +------------+--------------------------------------------------------+\n | FA | false alarm rate (prob. of false detection) |\n +------------+--------------------------------------------------------+\n | FAR | false alarm ratio |\n +------------+--------------------------------------------------------+\n | GSS | Gilbert skill score (equitable threat score) |\n +------------+--------------------------------------------------------+\n | HK | Hanssen-Kuipers discriminant (Pierce skill score) |\n +------------+--------------------------------------------------------+\n | HSS | Heidke skill score |\n +------------+--------------------------------------------------------+\n | POD | probability of detection (hit rate) |\n +------------+--------------------------------------------------------+\n | SEDI | symmetric extremal dependency index |\n +------------+--------------------------------------------------------+\n\n Returns\n -------\n result : list\n the verification results\n\n \"\"\"\n\n # flatten array if 2D\n pred = pred.flatten()\n obs = obs.flatten()\n\n # apply threshold\n predb = pred > thr\n obsb = obs > thr\n\n # calculate hits, misses, false positives, correct rejects\n H_idx = np.logical_and(predb==1, obsb==1) # correctly predicted precip\n F_idx = np.logical_and(predb==1, obsb==0) # predicted precip even though none there\n M_idx = np.logical_and(predb==0, obsb==1) # predicted no precip even though there was\n R_idx = np.logical_and(predb==0, obsb==0) # correctly predicted no precip\n\n H = sum(H_idx.astype(int)) # hits\n M = sum(M_idx.astype(int)) # misses\n F = sum(F_idx.astype(int)) # false alarms\n R = sum(R_idx.astype(int)) # correct rejections\n\n result = []\n for score in scores:\n\n score = score.lower()\n\n # simple scores\n POD = H/float(H+M) # probability of detection\n FAR = F/float(H+F) # false alarm ratio\n FA = F/float(F+R) # false alarm rate = prob of false detection\n s = (H+M)/float(H+M+F+R) # base rate = freq of observed events\n\n if score == 'pod':\n result.append(POD)\n if score == 'far':\n result.append(FAR)\n if score == 'fa':\n result.append(FA)\n if score == 'acc':\n ACC = (H+R)/(H+M+F+R) # accuracy (fraction correct)\n result.append(ACC)\n if score == 'csi':\n CSI = H/(H+M+F) # critical success index\n result.append(CSI)\n if score == 'bias': # frequency bias\n B = (H + F) / (H + M)\n result.append(B)\n\n # skill scores\n if score == 'hss':\n HSS = 2*(H*R-F*M)/((H+M)*(M+R)+(H+F)*(F+R)) # Heidke Skill Score (-1 < HSS < 1) < 0 implies no skill\n result.append(HSS)\n if score == 'hk':\n HK = POD-FA # Hanssen-Kuipers Discriminant\n result.append(HK)\n if score == 'gss':\n GSS = (POD-FA)/((1-s*POD)/(1-s)+FA*(1-s)/s) # Gilbert Skill Score\n result.append(GSS)\n if score == 'sedi':\n # Symmetric extremal dependence index\n SEDI = (np.log(FA)-np.log(POD)+np.log(1-POD)-np.log(1-FA))/(np.log(FA)\n +np.log(POD)+np.log(1-POD)+np.log(1-FA))\n result.append(SEDI)\n\n return result\n"
] | [
[
"numpy.array",
"numpy.isfinite"
],
[
"numpy.isfinite",
"numpy.reshape",
"numpy.arange",
"numpy.nanmin",
"numpy.stack",
"scipy.ndimage.interpolation.map_coordinates",
"numpy.zeros"
],
[
"numpy.pad",
"numpy.ma.getmaskarray",
"numpy.asarray",
"numpy.round",
"numpy.ceil",
"numpy.ma.masked_invalid",
"numpy.zeros_like",
"numpy.any",
"scipy.ndimage.interpolation.zoom",
"numpy.zeros"
],
[
"numpy.fft.fft2",
"numpy.isfinite",
"numpy.std",
"numpy.mean",
"numpy.random.RandomState",
"numpy.vstack",
"numpy.random.randint"
],
[
"numpy.log",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiaohanhuang/pytorch | [
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e",
"a31aea8eaa99a5ff72b5d002c206cd68d5467a5e"
] | [
"test/ao/sparsity/test_pruner.py",
"test/fx2trt/converters/acc_op/test_getitem.py",
"test/test_tensorexpr.py",
"torch/ao/sparsity/sparsifier/base_sparsifier.py",
"test/test_bundled_inputs.py",
"torch/ao/quantization/_quantize_dbr.py",
"test/test_type_hints.py",
"torch/onnx/symbolic_opset11.py",
"test/distributed/rpc/test_faulty_agent.py",
"torch/distributed/_sharded_tensor/shard.py",
"torch/ao/quantization/_dbr/auto_trace.py",
"torch/_vmap_internals.py",
"torch/fx/experimental/fx2trt/example/quantized_resnet_test.py",
"test/bottleneck_test/test_cuda.py"
] | [
"# -*- coding: utf-8 -*-\n# Owner(s): [\"module: unknown\"]\n\n\nimport copy\nimport logging\n\nimport torch\nfrom torch import nn\nfrom torch.ao.sparsity import BasePruner, PruningParametrization, ZeroesParametrization\nfrom torch.nn.utils import parametrize\n\nfrom torch.testing._internal.common_utils import TestCase\n\nlogging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\n\nDEVICES = {\n torch.device(\"cpu\"),\n torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n}\n\nNEEDS_ZEROS = { # these layers should have pruned indices zero-ed, not removed\n nn.BatchNorm2d\n}\n\n\nclass Linear(nn.Module):\n r\"\"\"Model with Linear layers, in Sequential and outside, without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16, bias=False)\n )\n self.linear = nn.Linear(16, 16, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass LinearB(nn.Module):\n r\"\"\"Model with Linear layers, in Sequential and outside, with biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(16, 16, bias=True)\n )\n self.linear = nn.Linear(16, 16, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinear(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, without biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=False),\n nn.ReLU(),\n nn.Linear(5, 8, bias=False),\n nn.ReLU(),\n nn.Linear(8, 6, bias=False)\n )\n self.linear = nn.Linear(6, 4, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinearB(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, with biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=True),\n nn.ReLU(),\n nn.Linear(5, 8, bias=True),\n nn.ReLU(),\n nn.Linear(8, 6, bias=True)\n )\n self.linear = nn.Linear(6, 4, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass MultipleLinearMixed(nn.Module):\n r\"\"\"Model with multiple Linear layers, in Sequential and outside, some with biases\n and with activation functions\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Linear(7, 5, bias=True),\n nn.ReLU(),\n nn.Linear(5, 8, bias=False),\n nn.ReLU(),\n nn.Linear(8, 6, bias=True)\n )\n self.linear = nn.Linear(6, 4, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.linear(x)\n return x\n\n\nclass Conv2dA(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=False),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dB(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, with biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dC(nn.Module):\n r\"\"\"Model with Conv2d layers, in Sequential and outside, with and without biases\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=False)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n return x\n\n\nclass Conv2dBN(nn.Module):\n r\"\"\"Model with Conv2d layers and BatchNorms\"\"\"\n def __init__(self):\n super().__init__()\n self.seq = nn.Sequential(\n nn.Conv2d(1, 32, 3, 1, bias=True),\n nn.BatchNorm2d(32)\n )\n self.conv2d = nn.Conv2d(32, 64, 3, 1, bias=True)\n self.bn = nn.BatchNorm2d(64)\n\n def forward(self, x):\n x = self.seq(x)\n x = self.conv2d(x)\n x = self.bn(x)\n return x\n\n\nclass SimplePruner(BasePruner):\n def update_mask(self, layer, **kwargs):\n layer.parametrizations.weight[0].pruned_outputs.add(1)\n\n\nclass MultiplePruner(BasePruner):\n def update_mask(self, layer, **kwargs):\n layer.parametrizations.weight[0].pruned_outputs.update([1, 2])\n\n\nclass TestBasePruner(TestCase):\n def _check_pruner_prepared(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n # Check mask exists\n assert hasattr(module, 'mask')\n # Check parametrization exists and is correct\n assert parametrize.is_parametrized(module)\n assert hasattr(module, \"parametrizations\")\n # Assume that this is the 1st/only parametrization\n if isinstance(module, tuple(NEEDS_ZEROS)):\n assert type(module.parametrizations.weight[0]) == ZeroesParametrization\n else:\n assert type(module.parametrizations.weight[0]) == PruningParametrization\n\n def _check_pruner_mask_squashed(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert not hasattr(module, \"parametrizations\")\n assert not hasattr(module, 'mask')\n\n def _check_pruner_valid_before_step(self, model, pruner, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert module.parametrizations.weight[0].pruned_outputs == set()\n\n def _check_pruner_valid_after_step(self, model, pruner, pruned_set, device):\n for config in pruner.module_groups:\n modules = []\n if type(config['module']) is tuple:\n for module in config['module']:\n modules.append(module)\n else:\n module = config['module']\n modules.append(module)\n for module in modules:\n assert module.weight.device.type == device.type\n assert module.parametrizations.weight[0].pruned_outputs == pruned_set\n\n def _test_constructor_on_device(self, model, device):\n self.assertRaisesRegex(TypeError, 'BasePruner .* update_mask',\n BasePruner)\n model1 = copy.deepcopy(model).to(device)\n pruner = SimplePruner(None)\n pruner.prepare(model1, None)\n for g in pruner.module_groups:\n module = g['module']\n assert module.weight.device.type == device.type\n assert len(pruner.module_groups) == 2\n pruner.step()\n # Can instantiate the model with configs\n model2 = copy.deepcopy(model).to(device)\n pruner = SimplePruner({'test': 3})\n pruner.prepare(model2, [model2.linear])\n assert len(pruner.module_groups) == 1\n assert pruner.module_groups[0]['fqn'] == 'linear'\n assert 'test' in pruner.module_groups[0]\n assert pruner.module_groups[0]['test'] == 3\n\n def test_constructor(self):\n model = Linear()\n for device in DEVICES:\n self._test_constructor_on_device(model, torch.device(device))\n\n def _test_prepare_linear_on_device(self, model, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones(128, 16, device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_prepared(model, pruner, device)\n assert model(x).shape == (128, 16)\n\n def test_prepare_linear(self):\n models = [Linear(), LinearB()] # without and with bias\n for device in DEVICES:\n for model in models:\n self._test_prepare_linear_on_device(model, torch.device(device))\n\n def _test_prepare_conv2d_on_device(self, model, config, device):\n x = torch.ones((1, 1, 28, 28), device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n self._check_pruner_prepared(model, pruner, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_prepare_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n model = model.to(device)\n self._test_prepare_conv2d_on_device(model, config, torch.device(device))\n\n def _test_squash_mask_linear_on_device(self, model, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones(128, 16, device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n pruner.squash_mask()\n self._check_pruner_mask_squashed(model, pruner, device)\n assert model(x).shape == (128, 16)\n\n def test_squash_mask_linear(self):\n models = [Linear(), LinearB()] # without and with bias\n for device in DEVICES:\n for model in models:\n self._test_squash_mask_linear_on_device(model, torch.device(device))\n\n def _test_squash_mask_conv2d_on_device(self, model, config, device):\n model = copy.deepcopy(model).to(device)\n x = torch.ones((1, 1, 28, 28), device=device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n pruner.squash_mask()\n self._check_pruner_mask_squashed(model, pruner, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_squash_mask_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]), (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n model = model.to(device)\n self._test_squash_mask_conv2d_on_device(model, config, torch.device(device))\n\n def _test_step_linear_on_device(self, model, is_basic, device):\n model = model.to(device)\n if is_basic:\n x = torch.ones(16, 16)\n pruner = SimplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n self._check_pruner_valid_after_step(model, pruner, {1}, device)\n else:\n x = torch.ones(7, 7)\n pruner = MultiplePruner(None)\n pruner.prepare(model, None)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n self._check_pruner_valid_after_step(model, pruner, {1, 2}, device)\n\n def test_step_linear(self):\n basic_models = [Linear(), LinearB()]\n complex_models = [MultipleLinear(), MultipleLinearB(), MultipleLinearMixed()]\n for device in DEVICES:\n for model in basic_models:\n self._test_step_linear_on_device(model, True, torch.device(device))\n for model in complex_models:\n self._test_step_linear_on_device(model, False, torch.device(device))\n\n def _test_step_conv2d_on_device(self, model, config, device):\n model = model.to(device)\n x = torch.ones((1, 1, 28, 28)).to(device)\n pruner = SimplePruner(None)\n pruner.prepare(model, config)\n self._check_pruner_valid_before_step(model, pruner, device)\n pruner.step()\n if type(model) is Conv2dBN:\n assert pruner.get_module_pruned_outputs(model.seq[1]) == pruner.get_module_pruned_outputs(model.seq[0])\n assert pruner.get_module_pruned_outputs(model.bn) == pruner.get_module_pruned_outputs(model.conv2d)\n self._check_pruner_valid_after_step(model, pruner, {1}, device)\n assert model(x).shape == (1, 64, 24, 24)\n\n def test_step_conv2d(self):\n bn_model = Conv2dBN()\n bn_config = [(bn_model.seq[0], bn_model.seq[1]),\n (bn_model.conv2d, bn_model.bn)]\n\n models = [Conv2dA(), Conv2dB(), Conv2dC(), bn_model]\n configs = [None, None, None, None, bn_config]\n for device in DEVICES:\n for model, config in zip(models, configs):\n self._test_step_conv2d_on_device(model, config, torch.device(device))\n",
"# Owner(s): [\"oncall: fx\"]\n\nimport torch\nimport torch.fx.experimental.fx_acc.acc_ops as acc_ops\nimport torch.nn as nn\nfrom torch.testing._internal.common_fx2trt import AccTestCase\nfrom parameterized import parameterized\n\n\nclass TestGetitemConverter(AccTestCase):\n @parameterized.expand(\n [\n (\"slice_batch_dim\", slice(None, None, None)),\n (\"slice_basic\", (slice(None, None, None), slice(0, 3, 2))),\n (\"slice_full\", (slice(None, None, None), slice(0, 10, 3))),\n (\"ellipsis\", (slice(None, None, None), ..., slice(0, 3, 2))),\n (\n \"slice_all_none\",\n (slice(None, None, None), slice(None, None, None)),\n ),\n (\n \"slice_start_none\",\n (slice(None, None, None), slice(None, 2, 1)),\n ),\n (\"slice_end_none\", (slice(None, None, None), slice(1, None, 1))),\n (\n \"slice_step_none\",\n (slice(None, None, None), slice(0, 3, None)),\n ),\n (\"slice_neg_idx\", (slice(None, None, None), -1)),\n (\"slice_neg_slice\", (slice(None, None, None), slice(-8, -2, 3))),\n (\"multi_dim\", (slice(None, None, None), 0, 1)),\n (\n \"slice_multi_dim\",\n (slice(None, None, None), slice(0, 3, 2), slice(1, -1, 3)),\n ),\n (\n \"none\",\n (slice(None, None, None), None, slice(1, -1, 3), 1),\n ),\n ]\n )\n def test_getitem(self, _, idx):\n class Getitem(nn.Module):\n def __init__(self, idx):\n super().__init__()\n self.idx = idx\n\n def forward(self, x):\n x = x + x\n return x[self.idx]\n\n inputs = [torch.randn(2, 10, 10, 10)]\n self.run_test(Getitem(idx), inputs, expected_ops={acc_ops.getitem})\n",
"# Owner(s): [\"NNC\"]\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom torch import nn\nimport unittest\n\nfrom torch.testing._internal.common_utils import suppress_warnings, num_profiled_runs, run_tests\n\nfrom torch.testing._internal.jit_utils import JitTestCase\n\n\nclass BaseTestClass(JitTestCase):\n def setUp(self):\n self.old_profiling_executor = torch._C._jit_set_profiling_executor(True)\n self.old_profiling_mode = torch._C._jit_set_profiling_mode(True)\n\n self.old_cpu_fuser_state = torch._C._jit_can_fuse_on_cpu()\n self.old_gpu_fuser_state = torch._C._jit_can_fuse_on_gpu()\n torch._C._jit_override_can_fuse_on_cpu(True)\n torch._C._jit_override_can_fuse_on_gpu(True)\n self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()\n torch._C._jit_set_texpr_fuser_enabled(True)\n self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()\n torch._C._debug_set_fusion_group_inlining(False)\n self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()\n torch._C._jit_set_te_must_use_llvm_cpu(False)\n\n self.devices = ['cpu'] if not torch.cuda.is_available() else ['cpu', 'cuda']\n\n def tearDown(self):\n torch._C._jit_set_profiling_executor(self.old_profiling_executor)\n torch._C._jit_set_profiling_mode(self.old_profiling_mode)\n\n torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)\n torch._C._jit_override_can_fuse_on_gpu(self.old_gpu_fuser_state)\n torch._C._jit_override_can_fuse_on_cpu(self.old_cpu_fuser_state)\n torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)\n torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)\n\n def assertLastGraphAllFused(self):\n self.assertAllFused(torch.jit.last_executed_optimized_graph())\n\n\ndef warmup_and_run_forward(f, *args):\n for _ in range(torch._C._jit_get_num_profiled_runs() + 1):\n results = f(*args)\n return results\n\n\nclass TestTensorExprFuser(BaseTestClass):\n def test_easy(self):\n def easy(x, y):\n aaa = torch.add(x, y)\n return aaa\n\n traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))\n\n a = torch.rand(1024)\n b = torch.rand(1024)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())\n\n def test_three_arg(self):\n def easy(x, y, z):\n aaa = torch.add(x, y)\n bbb = torch.add(aaa, z)\n return bbb\n\n traced = torch.jit.trace(\n easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))\n )\n\n a = torch.rand(1024)\n b = torch.rand(1024)\n c = torch.rand(1024)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = a.numpy() + b.numpy() + c.numpy()\n np.testing.assert_allclose(npr, x.numpy())\n\n def test_four_arg(self):\n def run_addcmul(x, y, z, w):\n c = torch.addcmul(torch.add(x, y), z, w)\n return c\n\n for dev in self.devices:\n rand_a = torch.rand(1024, dtype=torch.float, device=dev)\n rand_b = torch.rand(1024, dtype=torch.float, device=dev)\n rand_c = torch.rand(1024, dtype=torch.float, device=dev)\n rand_d = torch.rand(1024, dtype=torch.float, device=dev)\n\n traced = torch.jit.trace(\n run_addcmul,\n (\n torch.zeros(1024, dtype=torch.float, device=dev),\n torch.zeros(1024, dtype=torch.float, device=dev),\n torch.zeros(1024, dtype=torch.float, device=dev),\n torch.zeros(1024, dtype=torch.float, device=dev),\n ),\n )\n\n x = warmup_and_run_forward(traced, rand_a, rand_b, rand_c, rand_d)\n self.assertLastGraphAllFused()\n y = run_addcmul(rand_a, rand_b, rand_c, rand_d)\n np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=1e-6)\n\n def test_three_arg2(self):\n for device in self.devices:\n def test(x, y, z):\n aaa = torch.add(x, y)\n bbb = torch.add(aaa, z)\n return bbb\n\n M = 32\n N = 32\n traced = torch.jit.trace(\n test,\n (\n torch.rand(M, N, device=device),\n torch.rand(M, N, device=device),\n torch.rand(M, N, device=device),\n ),\n )\n\n a = torch.rand(M, N, device=device)\n b = torch.rand(M, N, device=device)\n c = torch.rand(M, N, device=device)\n x = traced(a, b, c)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()\n np.testing.assert_allclose(npr, x.cpu().numpy())\n\n def test_broadcast3(self):\n for device in self.devices:\n def test_body(M, N, L, K):\n def test(x, y, z):\n v1 = torch.add(x, y)\n v2 = torch.add(v1, z)\n return v2\n\n a_shape = [M, N]\n b_shape = [L, M, 1]\n c_shape = [K, L, 1, 1]\n traced = torch.jit.trace(\n test,\n (\n torch.rand(*a_shape, device=device),\n torch.rand(*b_shape, device=device),\n torch.rand(*c_shape, device=device),\n ),\n )\n\n a = torch.rand(*a_shape, device=device)\n b = torch.rand(*b_shape, device=device)\n c = torch.rand(*c_shape, device=device)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = a.cpu().numpy() + b.cpu().numpy() + c.cpu().numpy()\n np.testing.assert_allclose(npr, x.cpu().numpy())\n\n test_configs = [[5, 2, 7, 3], [8, 8, 8, 8]]\n for test_config in test_configs:\n test_body(*test_config)\n\n def test_all_combos(self):\n def easy(x, y, z):\n a = torch.add(x, y)\n b = torch.add(a, z)\n c = torch.add(x, b)\n d = torch.add(c, a)\n return d\n\n def np_easy(x, y, z):\n a = x + y\n b = a + z\n c = x + b\n d = c + a\n return d\n\n traced = torch.jit.trace(\n easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))\n )\n\n a = torch.rand(1024)\n b = torch.rand(1024)\n c = torch.rand(1024)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = np_easy(a.numpy(), b.numpy(), c.numpy())\n np.testing.assert_allclose(npr, x.numpy())\n\n def test_rank_two(self):\n def easy(x, y, z):\n a = torch.add(x, y)\n b = torch.add(a, z)\n c = torch.add(x, b)\n d = torch.add(c, a)\n return d\n\n def np_easy(x, y, z):\n a = x + y\n b = a + z\n c = x + b\n d = c + a\n return d\n\n shape = 32, 32\n traced = torch.jit.trace(\n easy, (torch.rand(shape), torch.rand(shape), torch.rand(shape))\n )\n\n a = torch.rand(shape)\n b = torch.rand(shape)\n c = torch.rand(shape)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = np_easy(a.numpy(), b.numpy(), c.numpy())\n np.testing.assert_allclose(npr, x.numpy())\n\n def test_broadcast(self):\n def easy(x, y, z):\n a = torch.add(x, y)\n b = torch.add(a, z)\n return b\n\n def np_easy(x, y, z):\n a = x + y\n b = a + z\n return b\n\n N = 32\n traced = torch.jit.trace(easy, (torch.rand(N, N), torch.rand(N), torch.rand(N, N)))\n\n a = torch.rand(N, N)\n b = torch.rand(N)\n c = torch.rand(N, N)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n npr = np_easy(a.numpy(), b.numpy(), c.numpy())\n np.testing.assert_allclose(npr, x.numpy())\n\n def test_broadcast_2(self):\n zero = torch.tensor([0.0], dtype=torch.float)\n\n def foo(x, y, z):\n aaa = torch.add(x, y)\n bbb = torch.add(zero, aaa)\n return torch.add(bbb, z)\n\n def foo_np(x, y, z):\n a = x + y\n b = zero.numpy() + a\n return b + z\n\n x = torch.rand(3, 4)\n y = torch.ones(3, 1)\n z = torch.rand(4)\n traced = torch.jit.trace(foo, (x, y, z))\n\n r = warmup_and_run_forward(traced, x, y, z)\n self.assertLastGraphAllFused()\n\n rnp = foo_np(x.numpy(), y.numpy(), z.numpy())\n np.testing.assert_allclose(r, rnp)\n\n def test_broadcast_big2(self):\n zero = torch.tensor([0.0], dtype=torch.float)\n\n def foo(x, y, z):\n aaa = torch.add(x, y)\n bbb = torch.add(zero, aaa)\n return torch.add(bbb, z)\n\n def foo_np(x, y, z):\n a = x + y\n b = zero.numpy() + a\n return b + z\n\n x = torch.rand(32, 1024)\n y = torch.ones(32, 1)\n z = torch.rand(1024)\n traced = torch.jit.trace(foo, (x, y, z))\n\n r = warmup_and_run_forward(traced, x, y, z)\n self.assertLastGraphAllFused()\n rnp = foo_np(x.numpy(), y.numpy(), z.numpy())\n np.testing.assert_allclose(r, rnp)\n\n def test_alpha(self):\n def alpha(x):\n aaa = torch.add(x, x, alpha=2.0)\n return aaa\n\n traced = torch.jit.trace(alpha, (torch.tensor([1.0])))\n\n a = torch.tensor([1.0])\n x = traced(a)\n np.testing.assert_allclose(a.numpy() + 2.0 * a.numpy(), x.numpy())\n\n @suppress_warnings\n def test_constant(self):\n def constant(x):\n bbb = torch.tensor([1.0])\n aaa = torch.add(x, bbb)\n return aaa\n\n traced = torch.jit.trace(constant, (torch.tensor([1.0])))\n\n a = torch.tensor([1.0])\n x = warmup_and_run_forward(traced, a)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(a.numpy() + 1.0, x.numpy())\n\n def test_add_sub(self):\n def easy(x, y, z):\n aaa = torch.add(x, y)\n bbb = torch.sub(aaa, z)\n return bbb\n\n traced = torch.jit.trace(\n easy, (torch.rand(1024), torch.rand(1024), torch.rand(1024))\n )\n\n a = torch.rand(1024)\n b = torch.rand(1024)\n c = torch.rand(1024)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(a.numpy() + b.numpy() - c.numpy(), x.numpy())\n\n def test_promotion(self):\n def easy(x, y):\n aaa = torch.add(x, y)\n return aaa\n\n traced = torch.jit.trace(\n easy,\n (torch.zeros(1024, dtype=torch.int32), torch.rand(1024, dtype=torch.float32)),\n )\n\n a = torch.zeros(1024, dtype=torch.int32)\n b = torch.rand(1024, dtype=torch.float32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(a.numpy() + b.numpy(), x.numpy())\n\n def test_double(self):\n TENSOR_LEN = 8\n\n def easy(x, y):\n aaa = torch.add(x, y)\n bbb = torch.mul(aaa, y)\n return bbb\n\n traced = torch.jit.trace(\n easy,\n (torch.rand(TENSOR_LEN, dtype=torch.float64), torch.full((TENSOR_LEN,), 0.5, dtype=torch.float64)),\n )\n\n a = torch.rand(TENSOR_LEN, dtype=torch.double)\n b = torch.full((TENSOR_LEN,), 0.5, dtype=torch.double)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())\n\n def test_short(self):\n TENSOR_LEN = 8\n\n def easy(x, y):\n aaa = torch.add(x, y)\n bbb = torch.mul(aaa, y)\n return bbb\n\n traced = torch.jit.trace(\n easy,\n (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16),\n torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)),\n )\n\n a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)\n b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int16)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())\n\n def test_char(self):\n TENSOR_LEN = 8\n\n def easy(x, y):\n aaa = torch.add(x, y)\n bbb = torch.mul(aaa, y)\n return bbb\n\n traced = torch.jit.trace(\n easy,\n (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),\n torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)),\n )\n\n a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)\n b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())\n\n def test_int64_promotion(self):\n TENSOR_LEN = 8\n\n def easy(x, y):\n aaa = torch.add(x, y)\n bbb = torch.mul(aaa, y)\n return bbb\n\n traced = torch.jit.trace(\n easy,\n (torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8),\n torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)),\n )\n\n a = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int8)\n b = torch.randint(TENSOR_LEN, (TENSOR_LEN,), dtype=torch.int64)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose((a.numpy() + b.numpy()) * b.numpy(), x.numpy())\n\n def test_eq(self):\n def easy(x, y):\n c = torch.eq(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))\n a = torch.zeros(1024, dtype=torch.int32)\n b = torch.zeros(1024, dtype=torch.int32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.ones(1024), x.numpy())\n\n def test_ne(self):\n def easy(x, y):\n c = torch.ne(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))\n a = torch.zeros(1024, dtype=torch.int32)\n b = torch.ones(1024, dtype=torch.int32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.ones(1024), x.numpy())\n\n def test_ge(self):\n def easy(x, y):\n c = torch.ge(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))\n aa = np.empty([1024], dtype=np.int32)\n aa.fill(5)\n a = torch.from_numpy(aa)\n b = torch.zeros(1024, dtype=torch.int32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.ones(1024), x.numpy())\n\n def test_gt(self):\n def easy(x, y):\n c = torch.gt(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))\n a = torch.ones(1024, dtype=torch.int32)\n b = torch.zeros(1024, dtype=torch.int32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.ones(1024), x.numpy())\n\n def test_le(self):\n def easy(x, y):\n c = torch.le(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.zeros(1024), torch.zeros(1024)))\n aa = np.empty([1024], dtype=np.int32)\n aa.fill(5)\n a = torch.from_numpy(aa)\n b = torch.zeros(1024, dtype=torch.int32)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.zeros(1024), x.numpy())\n\n def test_lt(self):\n def easy(x, y):\n c = torch.lt(x, y)\n return c\n\n for dev in self.devices:\n traced = torch.jit.trace(easy, (torch.zeros(1024, device=dev), torch.zeros(1024, device=dev)))\n a = torch.ones(1024, dtype=torch.int32, device=dev)\n b = torch.zeros(1024, dtype=torch.int32, device=dev)\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(np.zeros(1024), x.cpu().numpy())\n\n @suppress_warnings\n def test_min_max(self):\n def test(x, y):\n return torch.max(torch.min(x, y), torch.tensor([4.0]))\n\n traced = torch.jit.trace(test, (torch.zeros(1024), torch.zeros(1024)))\n a = 8.0 * torch.rand(1024)\n b = 8.0 * torch.rand(1024)\n np.testing.assert_allclose(\n warmup_and_run_forward(traced, a, b), np.maximum(np.minimum(a.numpy(), b.numpy()), [4.0])\n )\n self.assertLastGraphAllFused()\n\n def test_min_max_reduction(self):\n def test(x):\n return torch.min(x) + torch.max(x)\n\n traced = torch.jit.trace(test, (torch.zeros(1024)))\n a = 8.0 * torch.rand(1024)\n np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))\n self.assertLastGraphAllFused()\n\n def test_min_max_reduction2(self):\n def test(x):\n return x.min() + x.max()\n\n traced = torch.jit.trace(test, (torch.zeros(1024)))\n a = 8.0 * torch.rand(1024)\n np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(a.numpy()) + np.amax(a.numpy()))\n self.assertLastGraphAllFused()\n\n def test_min_max_reduction_dim1(self):\n def test(x):\n return torch.min(x, 1)[0] + torch.max(x, 1)[0]\n\n traced = torch.jit.trace(test, (torch.zeros(16, 16)))\n a = 8.0 * torch.rand(16, 16)\n np.testing.assert_allclose(warmup_and_run_forward(traced, a), np.amin(\n a.numpy(), axis=1) + np.amax(a.numpy(), axis=1))\n self.assertLastGraphAllFused()\n\n def test_min_max_reduction_dim1_2(self):\n def test(x):\n return torch.min(x * x, 1)\n\n traced = torch.jit.trace(test, (torch.zeros(16, 16)))\n a = 8.0 * torch.rand(16, 16)\n np.testing.assert_allclose(warmup_and_run_forward(traced, a)[0], np.amin((a * a).numpy(), axis=1))\n self.assertLastGraphAllFused()\n\n def test_clamp(self):\n def test(x):\n return torch.clamp(x + 3.0, 0.0, 6.0)\n\n for dev in self.devices:\n traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))\n a = 20.0 * torch.rand(1024, device=dev) - 10.0\n an = a.cpu().numpy()\n np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip(an + 3.0, 0.0, 6.0))\n self.assertLastGraphAllFused()\n\n def test_relu(self):\n def test(x):\n return torch.clamp(F.relu(x), 0, 0.5)\n\n for dev in self.devices:\n traced = torch.jit.trace(test, (torch.zeros(1024, device=dev)))\n a = 20.0 * torch.rand(1024, device=dev) - 10.0\n an = a.cpu().numpy()\n np.testing.assert_allclose(warmup_and_run_forward(traced, a).cpu(), np.clip((np.maximum(0, an)), 0, 0.5))\n self.assertLastGraphAllFused()\n\n def test_reps(self):\n def easy(x, y):\n c = torch.add(x, y)\n return c\n\n traced = torch.jit.trace(easy, (torch.rand(1024), torch.rand(1024)))\n\n for _ in range(32):\n a = torch.ones(1024)\n b = torch.zeros(1024)\n x = warmup_and_run_forward(traced, a, b)\n np.testing.assert_allclose(np.ones(1024), x.numpy())\n\n def test_add_const_rhs(self):\n def test(x):\n return x + 3.0\n\n traced = torch.jit.trace(test, torch.rand(4))\n x = torch.rand(4)\n y = warmup_and_run_forward(traced, x)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(x.numpy() + 3.0, y.numpy())\n\n def test_int_output(self):\n def test(x, y, z):\n return x * y * z\n\n xs = [(torch.rand(4) * 3 + 1).to(torch.int32) for i in range(3)]\n x, y, z = xs\n xn, yn, zn = [t.numpy() for t in xs]\n traced = torch.jit.trace(test, (x, y, z))\n res = warmup_and_run_forward(traced, x, y, z)\n self.assertLastGraphAllFused()\n np.testing.assert_allclose(xn * yn * zn, res.numpy())\n\n def test_binary_ops(self):\n def test_atan2(x, y):\n c = torch.atan2(torch.add(x, y), y)\n return c\n\n def test_gt(x, y):\n c = torch.gt(torch.add(x, y), y)\n return c\n\n def test_ge(x, y):\n c = torch.ge(torch.add(x, y), y)\n return c\n\n def test_lt(x, y):\n c = torch.lt(torch.add(x, y), y)\n return c\n\n def test_le(x, y):\n c = torch.le(torch.add(x, y), y)\n return c\n\n def test_lerp(x, y):\n c = torch.lerp(torch.add(x, 1), x, 2.0)\n return c\n\n def test_mul(x, y):\n c = torch.mul(torch.add(x, y), y)\n return c\n\n def test_ne(x, y):\n c = torch.ne(torch.add(x, y), y)\n return c\n\n def test_div(x, y):\n c = torch.div(torch.add(x, y), 2)\n return c\n\n def test_eq(x, y):\n c = torch.eq(torch.add(x, y), y)\n return c\n\n def test_fmod(x, y):\n c = torch.fmod(torch.add(x, y), 2)\n return c\n\n def test_sub(x, y):\n c = torch.sub(torch.add(x, y), x)\n return c\n\n def test_remainder(x, y):\n c = torch.remainder(torch.add(x, y), 3.0)\n return c\n\n def test_pow(x, y):\n c = torch.pow(torch.add(x, y), 2.0)\n return c\n\n def test_type_as(x, y):\n return x.type_as(torch.add(x, y))\n\n fns = {\n test_atan2,\n test_gt,\n test_ge,\n test_lt,\n test_le,\n test_lerp,\n test_mul,\n test_ne,\n test_div,\n test_eq,\n test_fmod,\n test_sub,\n test_remainder,\n test_pow,\n test_type_as,\n }\n for torch_fn in fns:\n for dev in self.devices:\n rand_a = torch.rand(1024, device=dev)\n rand_b = torch.rand(1024, device=dev)\n in1 = 20 * torch.rand(1024, device=dev)\n in2 = 20 * torch.rand(1024, device=dev)\n traced = torch.jit.trace(torch_fn, (in1, in2))\n x = warmup_and_run_forward(traced, rand_a, rand_b)\n self.assertLastGraphAllFused()\n y = torch_fn(rand_a, rand_b)\n np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=2e-3)\n\n def test_unary_ops(self):\n def test_cast_float(x, y):\n c = torch.ops.aten._cast_Float(torch.add(x, y))\n return c\n\n def test_round(x, y):\n c = torch.round(torch.add(x, y))\n return c\n\n def test_sin(x, y):\n c = torch.sin(torch.add(x, y))\n return c\n\n def test_asin(x, y):\n c = torch.asin(torch.add(x, y))\n return c\n\n def test_sinh(x, y):\n c = torch.sinh(torch.add(x, y))\n return c\n\n def test_cos(x, y):\n c = torch.cos(torch.add(x, y))\n return c\n\n def test_acos(x, y):\n c = torch.acos(torch.add(x, y))\n return c\n\n def test_cosh(x, y):\n c = torch.cosh(torch.add(x, y))\n return c\n\n def test_tan(x, y):\n c = torch.tan(torch.add(x, y))\n return c\n\n def test_atan(x, y):\n c = torch.atan(torch.add(x, y))\n return c\n\n def test_tanh(x, y):\n c = torch.tanh(torch.add(x, y))\n return c\n\n def test_sqrt(x, y):\n c = torch.sqrt(torch.add(x, y))\n return c\n\n def test_rsqrt(x, y):\n c = torch.rsqrt(torch.add(x, y))\n return c\n\n def test_floor(x, y):\n c = torch.floor(torch.add(x, y))\n return c\n\n def test_ceil(x, y):\n c = torch.ceil(torch.add(x, y))\n return c\n\n def test_trunc(x, y):\n c = torch.trunc(torch.add(x, y))\n return c\n\n def test_abs(x, y):\n c = torch.abs(torch.add(x, y))\n return c\n\n def test_log(x, y):\n c = torch.log(torch.add(x, y))\n return c\n\n def test_log2(x, y):\n c = torch.log2(torch.add(x, y))\n return c\n\n def test_log10(x, y):\n c = torch.log10(torch.add(x, y))\n return c\n\n def test_log1p(x, y):\n c = torch.log1p(torch.add(x, y))\n return c\n\n def test_rqrt(x, y):\n c = torch.rsqrt(torch.add(x, y))\n return c\n\n def test_erf(x, y):\n c = torch.erf(torch.add(x, y))\n return c\n\n def test_exp(x, y):\n c = torch.exp(torch.add(x, y))\n return c\n\n def test_expm1(x, y):\n c = torch.expm1(torch.add(x, y))\n return c\n\n def test_erfc(x, y):\n c = torch.erfc(torch.add(x, y))\n return c\n\n def test_frac(x, y):\n c = torch.frac(torch.add(x, y))\n return c\n\n def test_lgamma(x, y):\n c = torch.lgamma(torch.add(x, y))\n return c\n\n def test_sigmoid(x, y):\n c = torch.sigmoid(torch.add(x, y))\n return c\n\n def test_reciprocal(x, y):\n c = torch.reciprocal(torch.add(x, y))\n return c\n\n def test_neg(x, y):\n c = torch.neg(torch.add(x, y))\n return c\n\n def test_relu(x, y):\n c = torch.relu(torch.add(x, y))\n return c\n\n def test_hardtanh(x, y):\n c = F.hardtanh(torch.add(x, y), -1.0, 1.0)\n return c\n\n def test_threshold(x, y):\n c = F.threshold(torch.add(x, y), 0.5, 10)\n return c\n\n fns = {\n test_round,\n test_sin,\n test_asin,\n test_sinh,\n test_cos,\n test_acos,\n test_cosh,\n test_tan,\n test_atan,\n test_sqrt,\n test_floor,\n test_ceil,\n test_trunc,\n test_abs,\n test_log,\n test_log2,\n test_log10,\n test_log1p,\n test_rsqrt,\n test_exp,\n test_expm1,\n test_erf,\n test_erfc,\n test_frac,\n test_lgamma,\n test_reciprocal,\n test_neg,\n test_threshold,\n test_relu,\n test_tanh,\n test_hardtanh,\n test_sigmoid,\n }\n\n for torch_fn in fns:\n for dev in self.devices:\n # print(torch_fn, dev)\n rand_a = torch.rand(1024, device=dev)\n rand_b = torch.rand(1024, device=dev)\n ins = 20 * torch.rand(1024, device=dev)\n cc = np.empty([1024], dtype=np.float32)\n cc.fill(np.nan)\n nans = torch.from_numpy(cc).to(dev)\n traced = torch.jit.trace(torch_fn, (ins, ins))\n x = warmup_and_run_forward(traced, rand_a, rand_b)\n self.assertLastGraphAllFused()\n y = torch_fn(rand_a, rand_b)\n np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy(), atol=2e-3)\n # nans\n # TODO: reenable. Currently all of the tests fail\n # traced = torch.jit.trace(torch_fn, (ins, ins))\n # x = warmup_and_run_forward(traced, rand_a, rand_b)\n # y = torch_fn(nans, rand_b)\n # try:\n # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())\n # print(\"Succeeded on dev=\", dev, \"function=\", torch_fn)\n # except AssertionError:\n # # Print extra info before exiting:\n # print(\"Failed on dev=\", dev, \"function=\", torch_fn)\n # # np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())\n\n def test_rand_like(self):\n N = 1 << 16\n\n def run_rand_like(x, y):\n return torch.rand_like(torch.add(x, y))\n\n for device in self.devices:\n x = torch.rand(N, device=device)\n traced = torch.jit.trace(run_rand_like, (x, x), check_trace=False)\n x_v = warmup_and_run_forward(traced, x, x)\n self.assertLastGraphAllFused()\n x_np = x.cpu().numpy()\n x1_mean = np.mean(x_np)\n x2_mean = np.mean(x_np ** 2)\n x3_mean = np.mean(x_np ** 3)\n np.testing.assert_allclose(x1_mean, 1. / 2, rtol=2e-2)\n np.testing.assert_allclose(x2_mean, 1. / 3, rtol=2e-2)\n np.testing.assert_allclose(x3_mean, 1. / 4, rtol=2e-2)\n\n def test_nans(self):\n def test_max(x, y):\n return torch.max(2 * x, 2 * y)\n\n def test_min(x, y):\n return torch.min(2 * x, 2 * y)\n\n tmax = torch.jit.trace(test_max, (torch.rand(1), torch.rand(1)))\n tmin = torch.jit.trace(test_min, (torch.rand(1), torch.rand(1)))\n\n x = torch.tensor([np.nan])\n y = torch.tensor([1.0])\n\n assert np.isnan(warmup_and_run_forward(tmin, x, y).item())\n assert np.isnan(warmup_and_run_forward(tmin, y, x).item())\n self.assertLastGraphAllFused()\n assert np.isnan(warmup_and_run_forward(tmax, x, y).item())\n assert np.isnan(warmup_and_run_forward(tmax, y, x).item())\n self.assertLastGraphAllFused()\n\n def test_double_intrinsics(self):\n def do_pow(x):\n return torch.pow(x, 7)\n\n for device in self.devices:\n x = torch.rand(10, dtype=torch.double, device=device)\n traced = torch.jit.trace(do_pow, (x))\n x = warmup_and_run_forward(traced, x)\n self.assertLastGraphAllFused()\n\n def test_remainder(self):\n def run_remainder(x, y):\n c = torch.remainder(torch.add(x, y), x)\n return c\n\n a = torch.rand(1024, dtype=float)\n b = torch.rand(1024, dtype=float)\n zeros = torch.zeros(1024, dtype=float)\n cc = np.array(1024, dtype=float)\n cc.fill(np.nan)\n nans = torch.from_numpy(cc)\n\n # random floats\n traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n y = run_remainder(a, b)\n np.testing.assert_allclose(x.numpy(), y.numpy())\n\n # div by 0\n traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))\n x = warmup_and_run_forward(traced, zeros, a)\n self.assertLastGraphAllFused()\n y = run_remainder(zeros, a)\n np.testing.assert_allclose(x.numpy(), y.numpy())\n\n # numerators and denominatos are nan\n traced = torch.jit.trace(run_remainder, (torch.zeros(1024), torch.zeros(1024)))\n x = warmup_and_run_forward(traced, nans, a)\n self.assertLastGraphAllFused()\n y = run_remainder(nans, a)\n np.testing.assert_allclose(x.numpy(), y.numpy())\n\n def test_multioutput(self):\n def easy(x):\n b = x + 1\n c = b + b\n return (b, c)\n\n traced = torch.jit.trace(easy, (torch.zeros(1024)))\n\n a = torch.zeros(1024)\n b, c = warmup_and_run_forward(traced, a)\n self.assertLastGraphAllFused()\n bp = a.numpy() + 1\n cp = bp + bp\n np.testing.assert_allclose(b.numpy(), bp)\n np.testing.assert_allclose(c.numpy(), cp)\n\n def test_chunk(self):\n def easy(x):\n y = x + 1\n aaa, bbb = torch.chunk(y, 2)\n return aaa + bbb\n\n traced = torch.jit.trace(easy, (torch.zeros(1024, 1024)))\n\n a = torch.zeros(32, 32)\n x = warmup_and_run_forward(traced, a)\n self.assertLastGraphAllFused()\n npr = a.numpy()\n npr2 = npr + 1\n npr_a, npr_b = np.array_split(npr2, 2)\n np.testing.assert_allclose(npr_a + npr_b, x.numpy())\n\n def test_cat(self):\n for device in self.devices:\n def foo(*args):\n args_2 = [v + i for i, v in enumerate(args)]\n v = torch.cat(args_2, dim=1)\n return v * v\n\n M = 16\n Ns = [128, 16, 1]\n values = [torch.zeros(M, N, device=device) for N in Ns]\n traced = torch.jit.trace(foo, values)\n\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n # This test checks that we correctly handle fusion group with just aten::cat in it.\n # Note that the test only makes sense with min_fusion_group=1, otherwise no\n # fusion groups would be formed at all.\n # TODO: Fix and re-enable the test.\n @unittest.skip(\"cat is broken with fusion group inlining disabled\")\n def test_cat_only(self):\n for device in self.devices:\n def foo(*args):\n args_2 = [v + i for i, v in enumerate(args)]\n v = torch.cat(args_2, dim=1)\n return v\n\n M = 16\n Ns = [128, 16, 1]\n values = [torch.zeros(M, N, device=device) for N in Ns]\n traced = torch.jit.trace(foo, values)\n\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n def test_cat_negative_dim(self):\n for device in self.devices:\n def foo(*args):\n v = torch.cat(args, dim=-1)\n return v * v\n\n M = 16\n Ns = [128, 16, 1]\n values = [torch.randn(M, N, device=device) for N in Ns]\n traced = torch.jit.trace(foo, values)\n\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n def test_cat_promote_inputs(self):\n for device in self.devices:\n def foo(*args):\n v = torch.cat(args, dim=1)\n return v * v\n\n M = 16\n Ns = [128, 16, 1]\n dtypes = [torch.half, torch.float32, torch.double]\n values = [torch.randn(M, N, device=device, dtype=dt) for N, dt in zip(Ns, dtypes)]\n traced = torch.jit.trace(foo, values)\n\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n def test_cat_empty_tensors(self):\n for device in self.devices:\n def foo(*args):\n v = torch.cat(args, dim=1)\n return v * v\n\n M = 16\n Ns = [128, 16, 1]\n empty = torch.tensor([], device=device, dtype=torch.double)\n values = [empty] + [torch.randn(M, N, device=device) for N in Ns]\n traced = torch.jit.trace(foo, values)\n\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n # now test with only empty tensors\n values = [empty for i in range(3)]\n traced = torch.jit.trace(foo, values)\n x = warmup_and_run_forward(traced, *values)\n self.assertLastGraphAllFused()\n ref = foo(*values)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n def test_cat_with_constant_dim(self):\n for device in self.devices:\n def foo(*args):\n v1 = torch.cat(args, dim=1)\n v2 = torch.cat([v1], dim=1)\n return v2 * v2\n\n empty = torch.tensor([], device=device, dtype=torch.float32)\n inputs = [empty] + [torch.randn(1, 64, device=device), torch.randn(1, 64, device=device)]\n traced = torch.jit.trace(foo, inputs)\n\n x = warmup_and_run_forward(traced, *inputs)\n self.assertLastGraphAllFused()\n ref = foo(*inputs)\n np.testing.assert_allclose(ref.cpu().numpy(), x.cpu().numpy())\n\n def test_scalar(self):\n @torch.jit.script\n def test_float(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: float, b: float) -> torch.Tensor:\n return torch.add(torch.add(x, y, alpha=a), z, alpha=b)\n\n @torch.jit.script\n def test_int(x: torch.Tensor, y: torch.Tensor, z: torch.Tensor, a: int, b: int) -> torch.Tensor:\n return torch.add(torch.add(x, y, alpha=a), z, alpha=b)\n\n for test in (test_float, test_int):\n x, y, z = [torch.rand(4) for i in range(3)]\n a, b = 1, 2\n test(x, y, z, a, b)\n r = test(x, y, z, a, b)\n xn, yn, zn = [t.numpy() for t in (x, y, z)]\n np.testing.assert_allclose(r.numpy(), xn + yn * a + zn * b)\n\n def test_loop(self):\n @torch.jit.script\n def test(x: torch.Tensor, y: torch.Tensor, z: int) -> torch.Tensor:\n b = y\n for i in range(0, z):\n a = x + y\n b = b + y\n return b\n\n x, y, z = (torch.zeros(32, 32), torch.ones(32, 32), 4)\n test(x, y, z)\n r = test(x, y, z)\n\n def test_slice(self):\n def easy(x, y):\n a = x[0:512:2]\n b = y[0:512:2]\n return a + b\n\n traced = torch.jit.trace(easy, (torch.ones(1024, 1024), torch.zeros(1024, 1024)))\n\n a = torch.ones(1024, 1024)\n x = traced(a, a)\n npr = a[0:512:2]\n npr = npr + npr\n np.testing.assert_allclose(npr.numpy(), x.numpy())\n\n def test_unsqueeze(self, N=256):\n def easy(x, y):\n a = torch.unsqueeze(x, 0)\n b = torch.unsqueeze(y, 0)\n return a + b\n\n traced = torch.jit.trace(easy, (torch.ones(N, N), torch.zeros(N, N)))\n\n a = torch.rand(N, N)\n x = traced(a, a)\n npr = np.expand_dims(a, 0)\n npr = npr + npr\n np.testing.assert_allclose(npr, x.numpy())\n\n def _test_softmax(self, device):\n def test_softmax(x, y):\n a = F.softmax(x, dim=0, dtype=torch.float32)\n b = F.softmax(y, dim=0, dtype=torch.float32)\n c = F.softmax(x, dim=1, dtype=torch.float32)\n d = F.softmax(y, dim=1, dtype=torch.float32)\n return a + b + c + d\n\n def test_softmax_neg_index(x, y):\n a = F.softmax(x, dim=-2, dtype=torch.float32)\n b = F.softmax(y, dim=-2, dtype=torch.float32)\n c = F.softmax(x, dim=-1, dtype=torch.float32)\n d = F.softmax(y, dim=-1, dtype=torch.float32)\n return a + b + c + d\n\n def test_log_softmax(x, y):\n a = F.log_softmax(x, dim=0, dtype=torch.float32)\n b = F.log_softmax(y, dim=0, dtype=torch.float32)\n c = F.log_softmax(x, dim=1, dtype=torch.float32)\n d = F.log_softmax(y, dim=1, dtype=torch.float32)\n return a + b + c + d\n\n for test in (test_softmax, test_log_softmax, test_softmax_neg_index):\n old = torch._C._jit_set_texpr_reductions_enabled(True)\n traced = torch.jit.trace(test, (torch.randn(2, 3, device=device), torch.randn(2, 3, device=device)))\n inp = torch.randn(2, 3, device=device)\n res = traced(inp, inp)\n # Use eager mode as reference.\n ref = test(inp, inp)\n np.testing.assert_allclose(ref, res.cpu().numpy(), rtol=1e-06, atol=1e-06)\n torch._C._jit_set_texpr_reductions_enabled(old)\n\n def test_softmax_cpu(self):\n self._test_softmax('cpu')\n\n @unittest.skipIf(not torch.cuda.is_available(), \"requires CUDA\")\n @unittest.skip(\"global allocs are not supported yet.\")\n def test_softmax_cuda(self):\n self._test_softmax('cuda')\n\n def test_half_gelu(self):\n devices = [\"cuda\"] if torch.cuda.is_available() else []\n\n @torch.jit.script\n def bias_gelu(bias, y):\n x = bias + y\n return x * 0.5 * (1.0 + torch.erf(x / 1.41421))\n\n for device in devices:\n a = torch.rand(1024, dtype=torch.half, device=device)\n b = torch.rand(1024, dtype=torch.half, device=device)\n traced = torch.jit.trace(bias_gelu, (a, b))\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n\n def test_half_bn_relu(self):\n devices = [\"cuda\"] if torch.cuda.is_available() else []\n\n def foo(a, b, c):\n y = torch.nn.functional.batch_norm(a, b, c)\n z = y.relu()\n return z\n\n for device in devices:\n a = torch.rand(16, 16, dtype=torch.half, device=device)\n b = torch.rand(16, dtype=torch.half, device=device)\n c = torch.rand(16, dtype=torch.half, device=device)\n traced = torch.jit.trace(foo, (a, b, c))\n print(traced.graph)\n x = warmup_and_run_forward(traced, a, b, c)\n self.assertLastGraphAllFused()\n\n def test_exp_pow(self):\n @torch.jit.script\n def do_exp(x, y, z):\n return ((x * y) * 2) * torch.pow(z, 2)\n\n for device in self.devices:\n x = torch.rand(10, dtype=torch.double, device=device)\n y = torch.rand(10, dtype=torch.double, device=device)\n z = torch.rand(10, dtype=torch.double, device=device)\n traced = torch.jit.trace(do_exp, (x, y, z))\n x = warmup_and_run_forward(traced, x, y, z)\n self.assertLastGraphAllFused()\n\n def test_transpose(self):\n @torch.jit.script\n def test(x, y, z):\n return x.transpose(0, 1) + y + z\n x = torch.rand(4, 5, 2, 3)\n y = torch.rand(5, 4, 2, 3)\n z = torch.rand(5, 4, 2, 3)\n ref = test(x, y, z)\n res = test(x, y, z)\n np.testing.assert_allclose(ref.numpy(), res.numpy())\n\n def test_sliced_stride(self):\n @torch.jit.script\n def test(x, y, z):\n return x + y + z\n x = torch.rand(16, 4, 2, 3)[::2]\n y = torch.rand(8, 4, 2, 3)\n z = torch.rand(8, 4, 2, 3)\n ref = test(x, y, z)\n res = test(x, y, z)\n np.testing.assert_allclose(ref.numpy(), res.numpy())\n\n @unittest.skip(\"dynamic shapes are not quite there yet\")\n @unittest.skipIf(not torch.cuda.is_available(), \"requires CUDA\")\n def test_dynamic_shape(self):\n with num_profiled_runs(2):\n @torch.jit.script\n def test(x, y, z):\n return x * y * z\n x, y, z = [torch.rand(4, 8).cuda() for _ in range(3)]\n ref = test(x, y, z)\n _ = test(*[torch.rand(6, 8).cuda() for _ in range(3)])\n res = test(x, y, z)\n np.testing.assert_allclose(ref.cpu().numpy(), res.cpu().numpy())\n\n # A wild broadcast appears.\n x = torch.rand(4, 8).cuda()\n y = torch.rand(1, 8).cuda()\n z = torch.rand(4, 1).cuda()\n res = test(x, y, z)\n xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]\n np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)\n\n # Mismatched shapes shouldn't reach codegen.\n x = torch.rand(4, 8).cuda()\n y = torch.rand(4, 8).cuda()\n z = torch.rand(5, 8).cuda()\n try:\n res = test(x, y, z)\n except RuntimeError as e:\n assert \"The size of tensor a (4) must match\" in e.args[0]\n\n # Changing a static dimension fails guards.\n # x, y, z = [torch.rand(4, 7).cuda() for _ in range(3)]\n # xn, yn, zn = [t.cpu().numpy() for t in (x, y, z)]\n # res = test(x, y, z)\n # print(test.graph_for(x, y, z))\n # np.testing.assert_allclose(res.cpu().numpy(), xn * yn * zn)\n\n @unittest.skipIf(not torch.cuda.is_available(), \"requires CUDA\")\n def test_guard_fails(self):\n @torch.jit.script\n def test(x, y, z):\n return x * y * z\n r1 = test(*[torch.rand(4).cuda() for _ in range(3)])\n r2 = test(*[torch.rand(4).cuda() for _ in range(3)])\n r3 = test(*[torch.rand(4).cuda() for _ in range(3)])\n r4 = test(*[torch.rand(7).cuda() for _ in range(3)])\n\n def test_bitwise_ops(self):\n def run_and(x, y):\n return x & (x & y)\n\n def run_or(x, y):\n return x & (x | y)\n\n def run_xor(x, y):\n return x ^ (x ^ y)\n\n def run_lshift(x, y):\n return x & (x << y)\n\n def run_rshift(x, y):\n return x & (x >> y)\n\n fns = {run_and, run_or, run_xor, run_lshift, run_rshift}\n\n for device in self.devices:\n for fn in fns:\n a = torch.ones(128, dtype=torch.int32, device=device)\n b = torch.zeros(128, dtype=torch.int32, device=device)\n inp = torch.ones(128, dtype=torch.int32, device=device)\n traced = torch.jit.trace(fn, (inp, inp))\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n y = fn(a, b)\n np.testing.assert_allclose(x.cpu().numpy(), y.cpu().numpy())\n\n def test_where(self):\n def run_where(x, y):\n return torch.where(torch.gt(x, y), x, y)\n\n a = torch.rand(1024, dtype=float)\n b = torch.rand(1024, dtype=float)\n traced = torch.jit.trace(run_where, (torch.zeros(1024), torch.zeros(1024)))\n x = warmup_and_run_forward(traced, a, b)\n self.assertLastGraphAllFused()\n y = run_where(a, b)\n np.testing.assert_allclose(x.numpy(), y.numpy())\n\n def test_multi_rand(self):\n for device in self.devices:\n def test(x):\n y = torch.rand_like(x)\n return (x + y) - (y - x)\n a = torch.rand(4, device=device)\n scripted = torch.jit.script(test)\n out = warmup_and_run_forward(scripted, a)\n self.assertLastGraphAllFused()\n assert torch.allclose(out, 2 * a)\n\n def test_mask(self):\n def test(x):\n return x.unsqueeze(1) == 0\n\n for d in self.devices:\n x = torch.rand(4, device=d) > 0.5\n scripted = torch.jit.script(test)\n out = warmup_and_run_forward(scripted, x)\n self.assertLastGraphAllFused()\n assert torch.equal(out, test(x))\n\n def test_simple_add(self):\n val = torch._C._jit_get_te_generate_block_code()\n torch._C._jit_set_te_generate_block_code(True)\n fall_bk = torch._C._jit_texpr_fallback_allowed()\n torch._C._jit_texpr_set_fallback_allowed(True)\n\n def simple(a, b):\n return torch.add(a, b)\n\n a = torch.ones(256, 256)\n b = torch.ones(256, 256)\n traced = torch.jit.trace(simple,\n (torch.ones(256, 256), torch.ones(256, 256)))\n f = traced(a, b)\n f_test = np.full((256, 256), 2, dtype=float)\n np.testing.assert_allclose(f.numpy(), f_test)\n torch._C._jit_set_te_generate_block_code(val)\n torch._C._jit_texpr_set_fallback_allowed(fall_bk)\n\n def test_strided_output_preserved(self):\n def foo(a, b):\n return a + b - a\n\n # smaller, easier to debug example\n x = torch.arange(6)\n x = torch.as_strided(x, (2, 3), (1, 2))\n total = 0\n for i in range(2):\n for j in range(3):\n x[i, j] = total\n total += 1\n foo_script = torch.jit.script(foo)\n foo_script(x, x)\n foo_script(x, x)\n out_s = foo_script(x, x)\n out_eager = foo(x, x)\n self.assertEqual(out_s, out_eager)\n self.assertEqual(out_s.stride(), out_eager.stride())\n self.assertLastGraphAllFused()\n\n # more dims\n N, C, H, W, = 2, 3, 4, 5\n x = torch.rand(N, C, H, W).to(memory_format=torch.channels_last)\n foo_script = torch.jit.script(foo)\n foo_script(x, x)\n foo_script(x, x)\n out_s = foo_script(x, x)\n out_eager = foo(x, x)\n self.assertEqual(out_s, out_eager)\n self.assertEqual(out_s.stride(), out_eager.stride())\n self.assertLastGraphAllFused()\n\n def test_alias_analysis_module(self):\n class AliasModule(nn.Module):\n def __init__(self):\n super(AliasModule, self).__init__()\n torch.manual_seed(1337)\n self.a = torch.randn(128, 128)\n self.b = torch.randn(128, 128)\n self.c = torch.randn(128, 128)\n\n def forward(self, x, y, z):\n z = z + self.a\n self.b.add_(y)\n w = z + self.a\n z = w + x\n return z\n x = torch.randn(128, 128)\n\n def getModule(script):\n am = AliasModule()\n if script:\n return torch.jit.script(am)\n return am\n\n am = getModule(False)\n am_s = getModule(True)\n ref = am(x, x, x)\n test = am_s(x, x, x)\n torch.testing.assert_close(ref, test)\n\n # Now do the aliasing\n am.a = am.b\n ref = am(x, x, x)\n\n am_s.a = am_s.b\n test = am_s(x, x, x)\n\n torch.testing.assert_close(ref, test)\n\n def test_alias_analysis_inputs(self):\n class AliasModule(nn.Module):\n def __init__(self):\n super(AliasModule, self).__init__()\n torch.manual_seed(1337)\n self.a = torch.randn(128, 128)\n self.b = torch.randn(128, 128)\n self.c = torch.randn(128, 128)\n\n def forward(self, x, y, z):\n x.add_(y)\n w = z + self.a\n z = w + x\n return z\n\n def getModule(script):\n am = AliasModule()\n if script:\n return torch.jit.script(am)\n return am\n am = getModule(False)\n am_s = getModule(True)\n\n torch.manual_seed(1337)\n x = torch.randn(128, 128)\n ref = am(x, x, x)\n\n torch.manual_seed(1337)\n x = torch.randn(128, 128)\n test = am_s(x, x, x)\n\n torch.testing.assert_close(ref, test)\n\n def test_alias_analysis_input_and_module(self):\n class AliasModule(nn.Module):\n def __init__(self):\n super(AliasModule, self).__init__()\n torch.manual_seed(1337)\n self.a = torch.randn(128, 128)\n self.b = torch.randn(128, 128)\n self.c = torch.randn(128, 128)\n\n def forward(self, x, y, z):\n x.add_(y)\n w = z + self.b\n z = w + x\n return z\n\n def getModule(script):\n am = AliasModule()\n if script:\n return torch.jit.script(am)\n return am\n am = getModule(False)\n am_s = getModule(True)\n\n torch.manual_seed(1337)\n x = torch.randn(128, 128)\n am.b = x\n ref = am(x, x, x)\n\n torch.manual_seed(1337)\n x = torch.randn(128, 128)\n am_s.b = x\n test = am_s(x, x, x)\n\n torch.testing.assert_close(ref, test)\n\n def test_multiple_outputs(self):\n for device in self.devices:\n # A bug reported internally similar to the one reported in #48533\n def foo(a, b, c):\n t_next = c + 1\n t5 = t_next * b\n t6 = torch.unsqueeze(t_next, 1)\n t7 = a * t6\n return (t7, t5, t_next)\n\n a = torch.rand(20, 20, dtype=torch.float32, device=device)\n b = torch.rand(20 * 29, dtype=torch.float32, device=device).as_strided([20], [29])\n c = torch.ones(20, dtype=torch.int64, device=device)\n traced = torch.jit.trace(foo, (a, b, c))\n ref = foo(a, b, c)\n exp = traced(a, b, c)\n exp = traced(a, b, c)\n self.assertEqual(ref, exp)\n\nif __name__ == '__main__':\n run_tests()\n",
"\nimport abc\nimport copy\nfrom collections import defaultdict\nfrom typing import Dict\n\nimport torch\nfrom torch import nn\nfrom torch.nn.utils import parametrize\n\nfrom .utils import FakeSparsity, module_to_fqn, fqn_to_module\n\nSUPPORTED_MODULES = {\n nn.Linear\n}\n\n\nclass BaseSparsifier(abc.ABC):\n r\"\"\"Base class for all sparsifiers.\n\n Abstract methods that need to be implemented:\n\n - update_mask: Function to compute a new mask for all keys in the\n `module_groups`.\n\n Args:\n - model [nn.Module]: model to configure. The model itself is not saved\n but used for the state_dict saving / loading.\n - config [list]: configuration elements could either be instances of\n nn.Module or dict maps. The dicts must have a key 'module' with the\n value being an instance of a nn.Module.\n - defaults [dict]: default configurations will be attached to the\n configuration. Only the keys that don't exist in the `config` will\n be updated.\n\n Example::\n\n >>> config = [model.layer1, {'module': model.linear2, 'sparsity_level': 0.5}]\n >>> defaults = {'sparsity_level': 0.7}\n >>> # model.layer1 will have `sparsity_level` = 0.7 (getting default)\n >>> sparsifier = BaseSparsifier(config, defaults)\n \"\"\"\n def __init__(self, defaults):\n super().__init__()\n self.defaults = defaults\n if self.defaults is None:\n self.defaults = dict()\n\n self.state: Dict[str, Dict] = defaultdict(dict)\n self.module_groups = []\n self.enable_mask_update = True\n\n def __getstate__(self):\n return {\n 'defaults': self.defaults,\n 'state': self.state,\n 'module_groups': self.module_groups,\n }\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + ' ('\n for i, sparse_args in enumerate(self.module_groups):\n module = sparse_args['module']\n format_string += '\\n'\n format_string += f'\\tModule Group {i}\\n'\n format_string += f'\\t module: {module}\\n'\n for key in sorted(sparse_args.keys()):\n if key == 'module':\n continue\n format_string += f'\\t {key}: {sparse_args[key]}\\n'\n format_string += ')'\n return format_string\n\n def state_dict(self):\n r\"\"\"Returns the state of the optimizer as a :class:`dict`.\n\n It contains:\n * state - current state of the sparsification.\n * module_groups - a list containing all sparsity configuration groups\n with the key 'fqn' specifying the layer path within a model\n\n TODO: Need a clean way of loading the state of the \"preapred\" module\n \"\"\"\n module_groups = [\n dict(filter(lambda key_value: key_value[0] != 'module', mg.items()))\n for mg in self.module_groups\n ]\n\n return {\n 'state': self.state,\n 'module_groups': module_groups,\n }\n\n def load_state_dict(self, state_dict, strict=True):\n module_groups = copy.deepcopy(state_dict['module_groups'])\n states = state_dict['state']\n for fqn, s in states.items():\n layer = fqn_to_module(self.model, fqn)\n if strict and layer is None:\n raise RuntimeError(f'Error loading {fqn} into the model')\n\n found = False\n for p in layer.parametrizations['weight']:\n if isinstance(p, FakeSparsity):\n found = True\n break\n if not found:\n p = FakeSparsity(torch.ones(layer.weight.shape))\n parametrize.register_parametrization(layer, 'weight', p)\n if s.get('mask', None) is not None:\n mask = s.pop('mask')\n p.mask = mask\n\n for mg in module_groups:\n if mg['fqn'] == fqn:\n mg['module'] = layer\n self.__setstate__({'state': states, 'module_groups': module_groups})\n\n def prepare(self, model, config):\n r\"\"\"Prepares a model, by adding the parametrizations.\n\n Note::\n\n The model is modified inplace. If you need to preserve the original\n model, use copy.deepcopy.\n \"\"\"\n self.model = model # TODO: Need to figure out how to load without this.\n self.config = config\n # If no config -- try getting all the supported layers\n if self.config is None:\n # Add all models to the config\n self.config = []\n stack = [model]\n while stack:\n module = stack.pop()\n for name, child in module.named_children():\n if type(child) in SUPPORTED_MODULES:\n self.config.append(child)\n else:\n stack.append(child)\n\n # TODO: Remove the configuration by reference ('module')\n for module_config in self.config:\n if isinstance(module_config, nn.Module):\n module_config = {'module': module_config}\n local_args = copy.deepcopy(self.defaults)\n local_args.update(module_config)\n # Make sure there is at least one way of handling the model\n module = local_args.get('module', None)\n module_fqn = local_args.get('fqn', None)\n if module is None and module_fqn is None:\n # No module given for this group\n raise ValueError('Either `module` or `fqn` must be specified!')\n elif module is None:\n # FQN is given\n module = fqn_to_module(model, module_fqn)\n elif module_fqn is None:\n # Module is given\n module_fqn = module_to_fqn(model, module)\n else:\n # Both Module and FQN are given\n module_from_fqn = fqn_to_module(model, module_fqn)\n assert module is module_from_fqn, \\\n 'Given both `module` and `fqn`, it is expected them to ' \\\n 'refer to the same thing!'\n if module_fqn and module_fqn[0] == '.':\n module_fqn = module_fqn[1:]\n local_args['fqn'] = module_fqn\n local_args['module'] = module\n self.module_groups.append(local_args)\n\n self._prepare()\n\n def _prepare(self, *args, **kwargs):\n r\"\"\"Adds mask parametrization to the layer weight\n \"\"\"\n for config in self.module_groups:\n module = config['module']\n param = config.get('parametrization', FakeSparsity)\n mask = config.get('mask', torch.ones_like(module.weight))\n self.state[config['fqn']]['mask'] = mask\n parametrize.register_parametrization(module, 'weight', param(mask))\n\n def squash_mask(self, *args, **kwargs):\n for config in self.module_groups:\n module = config['module']\n parametrize.remove_parametrizations(module, 'weight',\n leave_parametrized=True)\n\n def convert(self):\n # TODO: Call the torch.ao.utils.convert in here\n raise NotImplementedError('`convert` is not implemented. Please, use '\n '`torch.ao.utils.convert` instead.')\n\n def step(self, use_path=True):\n if not self.enable_mask_update:\n return\n with torch.no_grad():\n for config in self.module_groups:\n module = config['module']\n self.update_mask(module, **config)\n\n @abc.abstractmethod\n def update_mask(self, layer, **kwargs):\n pass\n",
"#!/usr/bin/env python3\n# Owner(s): [\"oncall: mobile\"]\n\nimport io\nimport textwrap\nfrom typing import List, Optional, Dict\n\nimport torch\nimport torch.utils.bundled_inputs\nfrom torch.testing._internal.common_utils import TestCase, run_tests\n\n\ndef model_size(sm):\n buffer = io.BytesIO()\n torch.jit.save(sm, buffer)\n return len(buffer.getvalue())\n\n\ndef save_and_load(sm):\n buffer = io.BytesIO()\n torch.jit.save(sm, buffer)\n buffer.seek(0)\n return torch.jit.load(buffer)\n\n\nclass TestBundledInputs(TestCase):\n\n def test_single_tensors(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n sm = torch.jit.script(SingleTensorModel())\n original_size = model_size(sm)\n get_expr : List[str] = []\n samples = [\n # Tensor with small numel and small storage.\n (torch.tensor([1]),),\n # Tensor with large numel and small storage.\n (torch.tensor([[2, 3, 4]]).expand(1 << 16, -1)[:, ::2],),\n # Tensor with small numel and large storage.\n (torch.tensor(range(1 << 16))[-8:],),\n # Large zero tensor.\n (torch.zeros(1 << 16),),\n # Large channels-last ones tensor.\n (torch.ones(4, 8, 32, 32).contiguous(memory_format=torch.channels_last),),\n # Special encoding of random tensor.\n (torch.utils.bundled_inputs.bundle_randn(1 << 16),),\n # Quantized uniform tensor.\n (torch.quantize_per_tensor(torch.zeros(4, 8, 32, 32), 1, 0, torch.qint8),),\n ]\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n sm, samples, get_expr)\n # print(get_expr[0])\n # print(sm._generate_bundled_inputs.code)\n\n # Make sure the model only grew a little bit,\n # despite having nominally large bundled inputs.\n augmented_size = model_size(sm)\n self.assertLess(augmented_size, original_size + (1 << 12))\n\n loaded = save_and_load(sm)\n inflated = loaded.get_all_bundled_inputs()\n self.assertEqual(loaded.get_num_bundled_inputs(), len(samples))\n self.assertEqual(len(inflated), len(samples))\n self.assertTrue(loaded(*inflated[0]) is inflated[0][0])\n\n for idx, inp in enumerate(inflated):\n self.assertIsInstance(inp, tuple)\n self.assertEqual(len(inp), 1)\n self.assertIsInstance(inp[0], torch.Tensor)\n if idx != 5:\n # Strides might be important for benchmarking.\n self.assertEqual(inp[0].stride(), samples[idx][0].stride())\n self.assertEqual(inp[0], samples[idx][0], exact_dtype=True)\n\n # This tensor is random, but with 100,000 trials,\n # mean and std had ranges of (-0.0154, 0.0144) and (0.9907, 1.0105).\n self.assertEqual(inflated[5][0].shape, (1 << 16,))\n self.assertEqual(inflated[5][0].mean().item(), 0, atol=0.025, rtol=0)\n self.assertEqual(inflated[5][0].std().item(), 1, atol=0.02, rtol=0)\n\n\n def test_large_tensor_with_inflation(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n sm = torch.jit.script(SingleTensorModel())\n sample_tensor = torch.randn(1 << 16)\n # We can store tensors with custom inflation functions regardless\n # of size, even if inflation is just the identity.\n sample = torch.utils.bundled_inputs.bundle_large_tensor(sample_tensor)\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n sm, [(sample,)])\n\n loaded = save_and_load(sm)\n inflated = loaded.get_all_bundled_inputs()\n self.assertEqual(len(inflated), 1)\n\n self.assertEqual(inflated[0][0], sample_tensor)\n\n\n def test_rejected_tensors(self):\n def check_tensor(sample):\n # Need to define the class in this scope to get a fresh type for each run.\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n sm = torch.jit.script(SingleTensorModel())\n with self.assertRaisesRegex(Exception, \"Bundled input argument\"):\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n sm, [(sample,)])\n\n # Plain old big tensor.\n check_tensor(torch.randn(1 << 16))\n # This tensor has two elements, but they're far apart in memory.\n # We currently cannot represent this compactly while preserving\n # the strides.\n small_sparse = torch.randn(2, 1 << 16)[:, 0:1]\n self.assertEqual(small_sparse.numel(), 2)\n check_tensor(small_sparse)\n\n\n def test_non_tensors(self):\n class StringAndIntModel(torch.nn.Module):\n def forward(self, fmt: str, num: int):\n return fmt.format(num)\n\n sm = torch.jit.script(StringAndIntModel())\n samples = [\n (\"first {}\", 1),\n (\"second {}\", 2),\n ]\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n sm, samples)\n\n loaded = save_and_load(sm)\n inflated = loaded.get_all_bundled_inputs()\n self.assertEqual(inflated, samples)\n self.assertTrue(loaded(*inflated[0]) == \"first 1\")\n\n def test_multiple_methods_with_inputs(self):\n class MultipleMethodModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n @torch.jit.export\n def foo(self, arg):\n return arg\n\n mm = torch.jit.script(MultipleMethodModel())\n samples = [\n # Tensor with small numel and small storage.\n (torch.tensor([1]),),\n # Tensor with large numel and small storage.\n (torch.tensor([[2, 3, 4]]).expand(1 << 16, -1)[:, ::2],),\n # Tensor with small numel and large storage.\n (torch.tensor(range(1 << 16))[-8:],),\n # Large zero tensor.\n (torch.zeros(1 << 16),),\n # Large channels-last ones tensor.\n (torch.ones(4, 8, 32, 32).contiguous(memory_format=torch.channels_last),),\n ]\n info = [\n 'Tensor with small numel and small storage.',\n 'Tensor with large numel and small storage.',\n 'Tensor with small numel and large storage.',\n 'Large zero tensor.',\n 'Large channels-last ones tensor.',\n 'Special encoding of random tensor.',\n ]\n torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(\n mm,\n inputs={\n mm.forward : samples,\n mm.foo : samples\n },\n info={\n mm.forward : info,\n mm.foo : info\n }\n )\n loaded = save_and_load(mm)\n inflated = loaded.get_all_bundled_inputs()\n\n # Make sure these functions are all consistent.\n self.assertEqual(inflated, samples)\n self.assertEqual(inflated, loaded.get_all_bundled_inputs_for_forward())\n self.assertEqual(inflated, loaded.get_all_bundled_inputs_for_foo())\n\n # Check running and size helpers\n self.assertTrue(loaded(*inflated[0]) is inflated[0][0])\n self.assertEqual(loaded.get_num_bundled_inputs(), len(samples))\n\n # Check helper that work on all functions\n all_info = loaded.get_bundled_inputs_functions_and_info()\n self.assertEqual(set(all_info.keys()), set(['forward', 'foo']))\n self.assertEqual(all_info['forward']['get_inputs_function_name'], ['get_all_bundled_inputs_for_forward'])\n self.assertEqual(all_info['foo']['get_inputs_function_name'], ['get_all_bundled_inputs_for_foo'])\n self.assertEqual(all_info['forward']['info'], info)\n self.assertEqual(all_info['foo']['info'], info)\n\n # example of how to turn the 'get_inputs_function_name' into the actual list of bundled inputs\n for func_name in all_info.keys():\n input_func_name = all_info[func_name]['get_inputs_function_name'][0]\n func_to_run = getattr(loaded, input_func_name)\n self.assertEqual(func_to_run(), samples)\n\n def test_multiple_methods_with_inputs_both_defined_failure(self):\n class MultipleMethodModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n @torch.jit.export\n def foo(self, arg):\n return arg\n\n samples = [(torch.tensor([1]),)]\n\n # inputs defined 2 ways so should fail\n with self.assertRaises(Exception):\n mm = torch.jit.script(MultipleMethodModel())\n definition = textwrap.dedent(\"\"\"\n def _generate_bundled_inputs_for_forward(self):\n return []\n \"\"\")\n mm.define(definition)\n torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(\n mm,\n inputs={\n mm.forward : samples,\n mm.foo : samples,\n },\n )\n\n def test_multiple_methods_with_inputs_neither_defined_failure(self):\n class MultipleMethodModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n @torch.jit.export\n def foo(self, arg):\n return arg\n\n samples = [(torch.tensor([1]),)]\n\n # inputs not defined so should fail\n with self.assertRaises(Exception):\n mm = torch.jit.script(MultipleMethodModel())\n mm._generate_bundled_inputs_for_forward()\n torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs(\n mm,\n inputs={\n mm.forward : None,\n mm.foo : samples,\n },\n )\n\n def test_bad_inputs(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n # Non list for input list\n with self.assertRaises(TypeError):\n m = torch.jit.script(SingleTensorModel())\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n m,\n inputs=\"foo\" # type: ignore[arg-type]\n )\n\n # List of non tuples. Most common error using the api.\n with self.assertRaises(TypeError):\n m = torch.jit.script(SingleTensorModel())\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n m,\n inputs=[torch.ones(1, 2), ] # type: ignore[list-item]\n )\n\n def test_double_augment_fail(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n m = torch.jit.script(SingleTensorModel())\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n m,\n inputs=[(torch.ones(1),)]\n )\n with self.assertRaisesRegex(Exception, \"Models can only be augmented with bundled inputs once.\"):\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n m,\n inputs=[(torch.ones(1),)]\n )\n\n def test_double_augment_non_mutator(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n m = torch.jit.script(SingleTensorModel())\n bundled_model = torch.utils.bundled_inputs.bundle_inputs(\n m,\n inputs=[(torch.ones(1),)]\n )\n with self.assertRaises(AttributeError):\n m.get_all_bundled_inputs()\n self.assertEqual(bundled_model.get_all_bundled_inputs(), [(torch.ones(1),)])\n self.assertEqual(bundled_model.forward(torch.ones(1)), torch.ones(1))\n\n def test_double_augment_success(self):\n class SingleTensorModel(torch.nn.Module):\n def forward(self, arg):\n return arg\n\n m = torch.jit.script(SingleTensorModel())\n bundled_model = torch.utils.bundled_inputs.bundle_inputs(\n m,\n inputs={m.forward : [(torch.ones(1),)]}\n )\n self.assertEqual(bundled_model.get_all_bundled_inputs(), [(torch.ones(1),)])\n\n bundled_model2 = torch.utils.bundled_inputs.bundle_inputs(\n bundled_model,\n inputs=[(torch.ones(2),)]\n )\n self.assertEqual(bundled_model2.get_all_bundled_inputs(), [(torch.ones(2),)])\n\n\n def test_dict_args(self):\n class MyModel(torch.nn.Module):\n def forward(\n self,\n arg1: Optional[Dict[str, torch.Tensor]],\n arg2: Optional[List[torch.Tensor]],\n arg3: torch.Tensor,\n ):\n if arg1 is None:\n return arg3\n elif arg2 is None:\n return arg1[\"a\"] + arg1[\"b\"]\n else:\n return arg1[\"a\"] + arg1[\"b\"] + arg2[0]\n\n small_sample = dict(\n a=torch.zeros([10, 20]),\n b=torch.zeros([1, 1]),\n c=torch.zeros([10, 20]),\n )\n small_list = [torch.zeros([10, 20])]\n\n big_sample = dict(\n a=torch.zeros([1 << 5, 1 << 8, 1 << 10]),\n b=torch.zeros([1 << 5, 1 << 8, 1 << 10]),\n c=torch.zeros([1 << 5, 1 << 8, 1 << 10]),\n )\n big_list = [torch.zeros([1 << 5, 1 << 8, 1 << 10])]\n\n def condensed(t):\n ret = torch.empty_like(t).flatten()[0].clone().expand(t.shape)\n assert ret.storage().size() == 1\n # ret.storage()[0] = 0\n return ret\n\n def bundle_optional_dict_of_randn(template):\n return torch.utils.bundled_inputs.InflatableArg(\n value=(\n None\n if template is None\n else {k: condensed(v) for (k, v) in template.items()}\n ),\n fmt=\"{}\",\n fmt_fn=\"\"\"\n def {}(self, value: Optional[Dict[str, Tensor]]):\n if value is None:\n return None\n output = {{}}\n for k, v in value.items():\n output[k] = torch.randn_like(v)\n return output\n \"\"\",\n )\n\n def bundle_optional_list_of_randn(template):\n return torch.utils.bundled_inputs.InflatableArg(\n value=(None if template is None else [condensed(v) for v in template]),\n fmt=\"{}\",\n fmt_fn=\"\"\"\n def {}(self, value: Optional[List[Tensor]]):\n if value is None:\n return None\n output = []\n for v in value:\n output.append(torch.randn_like(v))\n return output\n \"\"\",\n )\n\n out : List[str] = []\n sm = torch.jit.script(MyModel())\n original_size = model_size(sm)\n small_inputs = (\n bundle_optional_dict_of_randn(small_sample),\n bundle_optional_list_of_randn(small_list),\n torch.zeros([3, 4]),\n )\n big_inputs = (\n bundle_optional_dict_of_randn(big_sample),\n bundle_optional_list_of_randn(big_list),\n torch.zeros([1 << 5, 1 << 8, 1 << 10]),\n )\n\n torch.utils.bundled_inputs.augment_model_with_bundled_inputs(\n sm,\n [\n big_inputs,\n small_inputs,\n ],\n _receive_inflate_expr=out,\n )\n augmented_size = model_size(sm)\n # assert the size has not increased more than 8KB\n self.assertLess(augmented_size, original_size + (1 << 13))\n\n loaded = save_and_load(sm)\n inflated = loaded.get_all_bundled_inputs()\n self.assertEqual(len(inflated[0]), len(small_inputs))\n\n methods, _ = torch.utils.bundled_inputs._get_bundled_inputs_attributes_and_methods(\n loaded\n )\n\n # One Function (forward)\n # two bundled inputs (big_inputs and small_inputs)\n # two args which have InflatableArg with fmt_fn\n # 1 * 2 * 2 = 4\n self.assertEqual(\n sum([method.startswith(\"_inflate_helper\") for method in methods]), 4\n )\n\n\nif __name__ == '__main__':\n run_tests()\n",
"import torch\n\nfrom ._dbr.auto_trace import add_auto_observation, add_auto_convert\nfrom ._dbr.fusion import get_module_fusion_fqns\n\n\ndef prepare(model, example_inputs, inplace=False, allow_list=None,\n observer_non_leaf_module_list=None,\n prepare_custom_config_dict=None,\n fuse_modules=True):\n r\"\"\"A wrapper around `torch.quantization.prepare` which prepares the\n model for quantization using dynamic tracing. Requires `example_inputs` to build\n the graph before calibration or quantization aware training can proceed.\n\n TODO(future PR): better docblock\n \"\"\"\n assert example_inputs is not None, 'example_inputs must be specified'\n\n if fuse_modules:\n # automatically fuse modules\n old_class = model.__class__\n # For now, need to propagate qconfig before observing, because\n # AutoQuantizationState needs a qconfig to work\n torch.quantization.propagate_qconfig_(model)\n model = add_auto_observation(model, example_inputs)\n module_fusion_fqns = get_module_fusion_fqns(model)\n if len(module_fusion_fqns):\n model = torch.quantization.fuse_modules(model, module_fusion_fqns)\n # TODO: also delete _auto_quant_staate of all children\n if hasattr(model, '_auto_quant_state'):\n del model._auto_quant_state\n model.__class__ = old_class\n\n # Automatically assign qconfigs for modules where the defaults do not\n # work.\n # TODO(future PR): clean this up and align with other APIs\n for name, child in model.named_modules():\n if isinstance(child, (torch.nn.Embedding, torch.nn.EmbeddingBag)):\n # pass\n # child.qconfig = torch.quantization.float_qparams_weight_only_qconfig\n # uncomment below to unbreak attention_is_all_you_need\n # TODO write up issue, maybe fix\n child.qconfig = None # type: ignore[assignment]\n elif isinstance(child, torch.nn.LSTM):\n # TODO: fix LSTM handling in eager mode static quant and remove this\n child.qconfig = None\n\n model = torch.quantization.prepare(\n model, inplace, allow_list, observer_non_leaf_module_list,\n prepare_custom_config_dict)\n assert not inplace\n model = add_auto_observation(model, example_inputs)\n return model\n\n\ndef convert(\n module, mapping=None, inplace=False, remove_qconfig=False,\n convert_custom_config_dict=None):\n r\"\"\"A wrapper around `torch.quantization.convert` which converts the model\n to a quantized form using dynamic tracing.\n\n TODO(future PR): better docblock\n \"\"\"\n # TODO: currently remove_qconfig deletes observers, two things need\n # to be fixed to enable this:\n # 1. scale/zp of non-module observers need to be saved before\n # observers are deleted\n # 2. current observer deletion logic does not know about AutoQuantState,\n # this needs to update to work the same way for modules and non-modules\n assert remove_qconfig is False\n model = torch.quantization.convert(\n module, mapping, inplace, remove_qconfig, convert_custom_config_dict)\n assert not inplace\n model = add_auto_convert(model)\n return model\n",
"# Owner(s): [\"module: typing\"]\n\nimport unittest\nfrom torch.testing._internal.common_utils import TestCase, run_tests, set_cwd\nimport tempfile\nimport torch\nimport doctest\nimport os\nimport inspect\nfrom pathlib import Path\n\ntry:\n import mypy.api\n HAVE_MYPY = True\nexcept ImportError:\n HAVE_MYPY = False\n\n\ndef get_examples_from_docstring(docstr):\n \"\"\"\n Extracts all runnable python code from the examples\n in docstrings; returns a list of lines.\n \"\"\"\n examples = doctest.DocTestParser().get_examples(docstr)\n return [f' {l}' for e in examples for l in e.source.splitlines()]\n\n\ndef get_all_examples():\n \"\"\"get_all_examples() -> str\n\n This function grabs (hopefully all) examples from the torch documentation\n strings and puts them in one nonsensical module returned as a string.\n \"\"\"\n blocklist = {\n \"_np\",\n }\n allexamples = \"\"\n\n example_file_lines = [\n \"import torch\",\n \"import torch.nn.functional as F\",\n \"import math\",\n \"import numpy\",\n \"import io\",\n \"import itertools\",\n \"\",\n # for requires_grad_ example\n # NB: We are parsing this file as Python 2, so we must use\n # Python 2 type annotation syntax\n \"def preprocess(inp):\",\n \" # type: (torch.Tensor) -> torch.Tensor\",\n \" return inp\",\n ]\n\n for fname in dir(torch):\n fn = getattr(torch, fname)\n docstr = inspect.getdoc(fn)\n if docstr and fname not in blocklist:\n e = get_examples_from_docstring(docstr)\n if e:\n example_file_lines.append(f\"\\n\\ndef example_torch_{fname}():\")\n example_file_lines += e\n\n for fname in dir(torch.Tensor):\n fn = getattr(torch.Tensor, fname)\n docstr = inspect.getdoc(fn)\n if docstr and fname not in blocklist:\n e = get_examples_from_docstring(docstr)\n if e:\n example_file_lines.append(f\"\\n\\ndef example_torch_tensor_{fname}():\")\n example_file_lines += e\n\n return \"\\n\".join(example_file_lines)\n\n\nclass TestTypeHints(TestCase):\n @unittest.skipIf(not HAVE_MYPY, \"need mypy\")\n def test_doc_examples(self):\n \"\"\"\n Run documentation examples through mypy.\n \"\"\"\n fn = Path(__file__).resolve().parent / 'generated_type_hints_smoketest.py'\n with open(fn, \"w\") as f:\n print(get_all_examples(), file=f)\n\n # OK, so here's the deal. mypy treats installed packages\n # and local modules differently: if a package is installed,\n # mypy will refuse to use modules from that package for type\n # checking unless the module explicitly says that it supports\n # type checking. (Reference:\n # https://mypy.readthedocs.io/en/latest/running_mypy.html#missing-imports\n # )\n #\n # Now, PyTorch doesn't support typechecking, and we shouldn't\n # claim that it supports typechecking (it doesn't.) However, not\n # claiming we support typechecking is bad for this test, which\n # wants to use the partial information we get from the bits of\n # PyTorch which are typed to check if it typechecks. And\n # although mypy will work directly if you are working in source,\n # some of our tests involve installing PyTorch and then running\n # its tests.\n #\n # The guidance we got from Michael Sullivan and Joshua Oreman,\n # and also independently developed by Thomas Viehmann,\n # is that we should create a fake directory and add symlinks for\n # the packages that should typecheck. So that is what we do\n # here.\n #\n # If you want to run mypy by hand, and you run from PyTorch\n # root directory, it should work fine to skip this step (since\n # mypy will preferentially pick up the local files first). The\n # temporary directory here is purely needed for CI. For this\n # reason, we also still drop the generated file in the test\n # source folder, for ease of inspection when there are failures.\n with tempfile.TemporaryDirectory() as tmp_dir:\n try:\n os.symlink(\n os.path.dirname(torch.__file__),\n os.path.join(tmp_dir, 'torch'),\n target_is_directory=True\n )\n except OSError:\n raise unittest.SkipTest('cannot symlink') from None\n repo_rootdir = Path(__file__).resolve().parent.parent\n # TODO: Would be better not to chdir here, this affects the\n # entire process!\n with set_cwd(str(repo_rootdir)):\n (stdout, stderr, result) = mypy.api.run([\n '--cache-dir=.mypy_cache/doc',\n '--no-strict-optional', # needed because of torch.lu_unpack, see gh-36584\n str(fn),\n ])\n if result != 0:\n self.fail(f\"mypy failed:\\n{stderr}\\n{stdout}\")\n\n\nif __name__ == '__main__':\n run_tests()\n",
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom sys import maxsize\n\nimport torch\nimport torch.onnx.symbolic_helper as sym_help\nimport warnings\n\nfrom torch.onnx.symbolic_helper import parse_args, _unimplemented, _is_tensor_list, ScalarType\nfrom torch.onnx.symbolic_opset9 import expand, unused, mul\nfrom torch.nn.modules.utils import _single, _pair, _triple\nfrom torch.onnx.utils import _add_block, _add_input_to_block, _add_output_to_block\n\n# EDITING THIS FILE? READ THIS FIRST!\n# see Note [Edit Symbolic Files] in symbolic_helper.py\n\n# This file exports ONNX ops for opset 11\n\n\n@parse_args(\"v\", \"f\", \"f\")\ndef hardtanh(g, self, min_val, max_val):\n dtype = self.type().scalarType()\n if dtype is None:\n dtype = ScalarType.FLOAT\n else:\n dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])\n min_val = g.op(\"Constant\", value_t=torch.tensor(min_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))\n max_val = g.op(\"Constant\", value_t=torch.tensor(max_val, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))\n return g.op(\"Clip\", self, min_val, max_val)\n\n\ndef clamp(g, self, min, max):\n dtype = self.type().scalarType()\n\n def _cast_if_not_none(tensor, dtype):\n if tensor is not None and not sym_help._is_none(tensor):\n return g.op(\"Cast\", tensor, to_i=sym_help.cast_pytorch_to_onnx[dtype])\n else:\n return tensor\n\n if dtype is not None:\n min = _cast_if_not_none(min, dtype)\n max = _cast_if_not_none(max, dtype)\n\n if sym_help._is_none(min):\n return clamp_max(g, self, max)\n elif sym_help._is_none(max):\n return clamp_min(g, self, min)\n else:\n if sym_help._get_tensor_rank(min) == 0 and sym_help._get_tensor_rank(max) == 0:\n return g.op(\"Clip\", self, min, max)\n else:\n return clamp_max(g, clamp_min(g, self, min), max)\n\n\n@parse_args(\"v\", \"v\")\ndef clamp_min(g, self, min):\n dtype = self.type().scalarType()\n min = g.op(\"Cast\", min, to_i=sym_help.cast_pytorch_to_onnx[dtype])\n if sym_help._get_tensor_rank(min) == 0:\n max = unused(g)\n return g.op(\"Clip\", self, min, max)\n else:\n return g.op(\"Max\", self, min)\n\n\n@parse_args(\"v\", \"v\")\ndef clamp_max(g, self, max):\n dtype = self.type().scalarType()\n max = g.op(\"Cast\", max, to_i=sym_help.cast_pytorch_to_onnx[dtype])\n if sym_help._get_tensor_rank(max) == 0:\n min = unused(g)\n return g.op(\"Clip\", self, min, max)\n else:\n return g.op(\"Min\", self, max)\n\n\ndef relu6(g, input):\n relu = g.op(\"Relu\", input)\n dtype = input.type().scalarType()\n if dtype is None:\n dtype = ScalarType.FLOAT\n else:\n dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])\n min_val = g.op(\"Constant\", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))\n max_val = g.op(\"Constant\", value_t=torch.tensor(6, dtype=sym_help.scalar_type_to_pytorch_type[dtype]))\n return clamp(g, relu, min_val, max_val)\n\n\n# Opset 11 gather accepts negative indices\n@parse_args(\"v\", \"i\", \"v\")\ndef select(g, self, dim, index):\n return g.op(\"Gather\", self, index, axis_i=dim)\n\n\ndef index_put(g, self, indices_list_value, values, accumulate=False):\n if sym_help._is_packed_list(indices_list_value):\n indices_list = sym_help._unpack_list(indices_list_value)\n else:\n indices_list = [indices_list_value]\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n args = [self] + indices_list + [values, accumulate]\n return g.op(\"ATen\", *args, operator_s=\"index_put\")\n\n from torch.onnx.symbolic_opset9 import add, expand\n accumulate = sym_help._parse_arg(accumulate, \"b\")\n\n if len(indices_list) == 0:\n return values\n\n if len(indices_list) > 1:\n for idx_ in range(len(indices_list)):\n if indices_list[idx_].type().scalarType() == \"Bool\":\n indices_list[idx_] = g.op(\"NonZero\", indices_list[idx_])\n index = indices_list[0]\n\n for ind in indices_list[1:]:\n index = add(g, index, ind)\n broadcast_index_shape = g.op(\"Shape\", index)\n indices_list = [\n sym_help._unsqueeze_helper(g, expand(g, ind, broadcast_index_shape, None), [-1]) for ind in indices_list\n ]\n index = g.op(\"Concat\", *indices_list, axis_i=-1)\n else:\n # Replace index_put node with masked_scatter or masked_fill\n # when inputs to the index_put node contains a single boolean input.\n #\n # index_put -> masked_fill\n # * input index contains single tensor of Bool type (e.g.: %24 <- %23).\n # * input value contains single element (e.g.: %18).\n #\n # Torch IR\n # %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)\n # %16 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =\n # aten::to(%8, %26, %27, %11, %12, %28, %29, %15)\n # %18 : Float(requires_grad=0, device=cpu) = prim::Constant[value={1}]()\n # %23 : Bool(8, strides=[1], device=cpu) = aten::view(%16, %22)\n # %24 : Tensor?[] = prim::ListConstruct(%23)\n # %25 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) =\n # aten::index_put(%mask, %24, %18, %30)\n # return (%25)\n #\n #\n # index_put -> masked_scatter\n # * input index contains single tensor of Bool type (e.g.: %32 <- %31).\n # * input value contains multiple elements (e.g.: %28).\n #\n # Torch IR\n # %mask : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu) = aten::clone(%0, %6)\n # %28 : Float(8, strides=[1], requires_grad=0, device=cpu)\n # = prim::Constant[value= 1 1 1 1 1 1 1 1 [ CPUFloatType{8} ]]()\n # %15 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)\n # = aten::ne(%mask, %some_const)\n # %23 : Bool(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)\n # = aten::to(%15, %34, %35, %18, %19, %36, %37, %22)\n # %38 : Long(requires_grad=0, device=cpu) = prim::Constant[value={0}]()\n # %30 : int[] = prim::Constant[value=[-1]]()\n # %31 : Bool(8, strides=[1], device=cpu) = aten::view(%23, %30)\n # %32 : Tensor?[] = prim::ListConstruct(%31)\n # %33 : Float(2, 2, 2, strides=[4, 2, 1], requires_grad=0, device=cpu)\n # = aten::index_put(%mask, %32, %28, %38)\n # return (%33)\n index = indices_list[0]\n bool_inp = index\n if bool_inp.type() is not None and bool_inp.type().scalarType() == \"Bool\":\n rank = sym_help._get_tensor_rank(values)\n if rank is not None and rank == 0:\n from torch.onnx.symbolic_opset9 import masked_fill\n return masked_fill(g, self, bool_inp, values)\n return masked_scatter(g, self, bool_inp, values)\n broadcast_index_shape = g.op(\"Shape\", index)\n index = sym_help._unsqueeze_helper(g, index, [-1])\n sub_data_shape = sym_help._slice_helper(\n g, g.op(\"Shape\", self), axes=[0], starts=[len(indices_list)], ends=[maxsize])\n values_shape = g.op(\"Concat\", broadcast_index_shape, sub_data_shape, axis_i=0)\n # Check if values is a singular value and expand accordingly\n rank = sym_help._get_tensor_rank(values)\n if rank is not None and rank == 0:\n values = expand(g, values, values_shape, None)\n values = sym_help._reshape_helper(g, values, values_shape)\n\n dtype = self.type().scalarType()\n if dtype is not None and dtype != values.type().scalarType():\n values = g.op(\"Cast\", values, to_i=sym_help.cast_pytorch_to_onnx[dtype])\n dtype = sym_help.scalar_type_to_onnx.index(sym_help.cast_pytorch_to_onnx[dtype])\n dtype = sym_help.scalar_type_to_pytorch_type[dtype]\n\n if accumulate:\n zeros = g.op(\"ConstantOfShape\", g.op(\"Shape\", self), value_t=torch.tensor([0], dtype=dtype))\n result = g.op(\"ScatterND\", zeros, index, values)\n result = add(g, self, result)\n else:\n result = g.op(\"ScatterND\", self, index, values)\n\n return result\n\n\n@parse_args(\"v\", \"i\")\ndef pixel_shuffle(g, self, upscale_factor):\n rank = sym_help._get_tensor_rank(self)\n if rank is not None and rank != 4:\n return _unimplemented(\"pixel_shuffle\", \"only support 4d input\")\n return g.op(\"DepthToSpace\", self, blocksize_i=upscale_factor, mode_s=\"CRD\")\n\n\ndef _interpolate(name, dim, interpolate_mode):\n return sym_help._interpolate_helper(name, dim, interpolate_mode)\n\n\nupsample_nearest1d = _interpolate(\"upsample_nearest1d\", 3, \"nearest\")\nupsample_nearest2d = _interpolate(\"upsample_nearest2d\", 4, \"nearest\")\nupsample_nearest3d = _interpolate(\"upsample_nearest3d\", 5, \"nearest\")\nupsample_linear1d = _interpolate(\"upsample_linear1d\", 3, \"linear\")\nupsample_bilinear2d = _interpolate(\"upsample_bilinear2d\", 4, \"linear\")\nupsample_trilinear3d = _interpolate(\"upsample_trilinear3d\", 5, \"linear\")\nupsample_bicubic2d = _interpolate(\"upsample_bicubic2d\", 4, \"cubic\")\n\n\ndef __interpolate(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor, antialias):\n return sym_help.__interpolate_helper(g, input, size, scale_factor, mode, align_corners, recompute_scale_factor)\n\n@parse_args(\"v\", \"i\", \"v\", \"v\")\ndef gather(g, self, dim, index, sparse_grad=False):\n if sym_help._maybe_get_const(sparse_grad, \"i\"):\n return _unimplemented(\"gather\", \"sparse_grad == True\")\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"ATen\", self, dim, index, sparse_grad, operator_s=\"gather\")\n return g.op(\"GatherElements\", self, index, axis_i=dim)\n\n\n@parse_args(\"v\", \"i\", \"v\", \"v\")\ndef scatter(g, self, dim, index, src):\n from torch.onnx.symbolic_opset9 import expand_as\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"ATen\", self, dim, index, src, operator_s=\"scatter\")\n src_type = src.type().scalarType()\n src = sym_help._maybe_get_scalar(src)\n if sym_help._is_value(src):\n return g.op(\"ScatterElements\", self, index, src, axis_i=dim)\n else:\n # Check if scalar \"src\" has same type as self (PyTorch allows different\n # type for scalar src (but not when src is tensor)). If not, insert Cast node.\n if self.type().scalarType() != src_type:\n src = g.op(\"Cast\", src, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])\n return g.op(\"ScatterElements\", self, index, expand_as(g, src, index), axis_i=dim)\n\n\n@parse_args(\"v\", \"i\", \"none\")\ndef cumsum(g, self, dim, dtype=None):\n dim_tensor = g.op(\"Constant\", value_t=torch.tensor(dim, dtype=torch.int))\n if dtype and dtype.node().kind() != \"prim::Constant\":\n parsed_dtype = sym_help._get_const(dtype, \"i\", \"dtype\")\n cast = g.op(\"Cast\", self, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])\n else:\n cast = self\n csum = g.op(\"CumSum\", cast, dim_tensor)\n return csum\n\n\ndef masked_select(g, self, mask):\n from torch.onnx.symbolic_opset9 import nonzero, expand_as\n index = nonzero(g, expand_as(g, mask, self))\n return g.op(\"GatherND\", self, index)\n\n\ndef masked_scatter(g, self, mask, source):\n from torch.onnx.symbolic_opset9 import nonzero, expand_as, size\n index = nonzero(g, expand_as(g, mask, self))\n # NOTE: source can have more elements than needed.\n # It could also have arbitrary shape.\n # This is not supported by ONNX::ScatterND, so we need to flatten and slice source tensor.\n source = sym_help._reshape_helper(g, source, torch.LongTensor([-1]))\n source = sym_help._slice_helper(g, source,\n axes=torch.LongTensor([0]),\n starts=torch.LongTensor([0]),\n ends=size(g, index, torch.LongTensor([0])),\n dynamic_slice=True)\n return g.op(\"ScatterND\", self, index, source)\n\n\ndef _len(g, self):\n if _is_tensor_list(self) or self.node().kind() == \"onnx::SplitToSequence\":\n return g.op(\"SequenceLength\", self)\n sz_0 = size(g, self, g.op(\"Constant\", value_t=torch.LongTensor([0])))\n return sym_help._squeeze_helper(g, sz_0, [0])\n\n\ndef __getitem_(g, self, i):\n if sym_help._is_tensor_list(self):\n # SequenceAt requires that the input be a List of Tensors\n return g.op(\"SequenceAt\", self, i)\n else:\n from torch.onnx.symbolic_opset9 import __getitem_ as getitem\n return getitem(g, self, i)\n\ndef _set_item(g, tensor_list, i, v):\n tensor_list = g.op(\"SequenceErase\", tensor_list, i)\n return g.op(\"SequenceInsert\", tensor_list, v, i)\n\ndef append(g, self, tensor):\n return g.op(\"SequenceInsert\", self, tensor)\n\n\ndef add(g, self, other, alpha=None):\n if sym_help._is_value(self) and sym_help._is_tensor_list(self):\n tensor_list_node = other.node()\n if tensor_list_node.kind() != \"prim::ListConstruct\":\n return _unimplemented(\"add\", \"does not support adding dynamic tensor list to another\")\n tensors = sym_help._unpack_list(other)\n l = self\n for t in tensors:\n l = g.op(\"SequenceInsert\", l, t)\n return l\n\n return torch.onnx.symbolic_opset9.add(g, self, other, alpha)\n\ndef insert(g, self, pos, tensor):\n return g.op(\"SequenceInsert\", self, tensor, pos)\n\n\ndef pop(g, tensor_list, dim):\n return g.op(\"SequenceErase\", tensor_list, dim)\n\ndef Delete(g, tensor_list, dim):\n return g.op(\"SequenceErase\", tensor_list, dim)\n\ndef cat(g, tensor_list, dim):\n if sym_help._is_packed_list(tensor_list):\n from torch.onnx.symbolic_opset9 import cat as cat_opset9\n return cat_opset9(g, tensor_list, dim)\n else:\n dim = sym_help._get_const(dim, \"i\", \"dim\")\n return g.op(\"ConcatFromSequence\", tensor_list, axis_i=dim)\n\n\ndef stack(g, tensor_list, dim):\n if sym_help._is_packed_list(tensor_list):\n from torch.onnx.symbolic_opset9 import stack as stack_opset9\n return stack_opset9(g, tensor_list, dim)\n else:\n dim = sym_help._get_const(dim, \"i\", \"dim\")\n return g.op(\"ConcatFromSequence\", tensor_list, axis_i=dim, new_axis_i=1)\n\n\n@parse_args(\"v\", \"i\", \"i\", \"i\")\ndef _unique2(g, self, sorted, return_inverse, return_counts):\n u, indices, inverse_indices, counts = g.op(\"Unique\", self, sorted_i=sorted, outputs=4)\n return u, inverse_indices, counts\n\n\ndef _avg_pool(name, tuple_fn):\n @parse_args(\"v\", \"is\", \"is\", \"is\", \"i\", \"i\", \"none\")\n def symbolic_fn(g, input, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override=None):\n padding = sym_help._avgpool_helper(tuple_fn, padding, kernel_size, stride, divisor_override, name)\n if not stride:\n stride = kernel_size\n if count_include_pad:\n input = g.op(\"Pad\", input,\n g.op(\"Constant\", value_t=torch.tensor(((0,) * 2 + padding) * 2)), mode_s=\"constant\")\n padding = (0,) * len(padding)\n output = g.op(\"AveragePool\", input,\n kernel_shape_i=tuple_fn(kernel_size),\n strides_i=tuple_fn(stride),\n pads_i=padding * 2,\n ceil_mode_i=ceil_mode)\n return output\n return symbolic_fn\n\n\navg_pool1d = _avg_pool(\"avg_pool1d\", _single)\navg_pool2d = _avg_pool(\"avg_pool2d\", _pair)\navg_pool3d = _avg_pool(\"avg_pool3d\", _triple)\n\n\n@parse_args(\"v\", \"i\", \"i\", \"i\", \"i\")\ndef unique_dim(g, self, dim, sorted, return_inverse, return_counts):\n u, indices, inverse_indices, counts = g.op(\"Unique\", self, axis_i=dim, sorted_i=sorted, outputs=4)\n return u, inverse_indices, counts\n\n\n@parse_args(\"v\", \"v\", \"i\", \"i\", \"i\", \"none\")\ndef topk(g, self, k, dim, largest, sorted, out=None):\n return sym_help._topk_helper(g, self, k, dim, largest=largest, sorted=sorted, out=out)\n\n\n@parse_args(\"v\", \"i\", \"i\", \"none\")\ndef sort(g, self, dim, decending, out=None):\n return sym_help._sort_helper(g, self, dim, decending=decending, out=out)\n\n\ndef round(g, self):\n return g.op(\"Round\", self)\n\n\ndef remainder(g, input, other):\n if sym_help._is_fp(input) or sym_help._is_fp(other):\n from torch.onnx.symbolic_opset9 import remainder as _remainder_9\n return _remainder_9(g, input, other)\n return g.op(\"Mod\", input, other, fmod_i=0)\n\n\n@parse_args(\"v\", \"v\", \"i\", \"i\")\ndef split(g, self, split_size_or_sizes, dim, _outputs=None):\n if not sym_help._is_split_static(split_size_or_sizes, _outputs):\n split_out = g.op(\"SplitToSequence\", self, split_size_or_sizes, axis_i=dim)\n if _outputs is None:\n return split_out\n # Convert to multiple slice nodes iff number of splits and number of outputs are statically known.\n if sym_help._is_packed_list(split_size_or_sizes) and len(sym_help._unpack_list(split_size_or_sizes)) == _outputs:\n split_sizes = [sym_help._unsqueeze_helper(g, v, [0]) for v in sym_help._unpack_list(split_size_or_sizes)]\n start = g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.long))\n axis = g.op(\"Constant\", value_t=torch.tensor([dim], dtype=torch.long))\n res = []\n for i in range(_outputs):\n end = g.op(\"Add\", start, split_sizes[i]) # split_sizes is a list of same length as _outputs\n res.append(g.op(\"Slice\", self, start, end, axis))\n start = end\n return res\n return [g.op(\"SequenceAt\", split_out, g.op(\"Constant\", value_t=torch.tensor([i], dtype=torch.long)))\n for i in range(_outputs)]\n else:\n return torch.onnx.symbolic_opset9.split(g, self, split_size_or_sizes, dim, _outputs)\n\n\n@parse_args(\"v\", \"v\", \"i\", \"i\")\ndef split_with_sizes(g, self, split_sizes, dim, _outputs=None):\n return split(g, self, split_sizes, dim, _outputs)\n\n\n@parse_args(\"v\", \"i\", \"i\")\ndef unbind(g, self, dim=0, _outputs=None):\n if _outputs is None:\n return g.op(\"SplitToSequence\", self, g.op(\"Constant\", value_t=torch.tensor(1, dtype=torch.long)), axis_i=dim, keepdims_i=0)\n else:\n return torch.onnx.symbolic_opset9.unbind(g, self, dim, _outputs)\n\n\n# Generate paddings in ONNX order based on pad in pytorch.\n# Args:\n# input: the input tensor.\n# pad: the paddings in pytorch.\n# The order is dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, ..., dim_m_begin, dim_m_end,\n# where m is in range [0, n].\ndef _prepare_onnx_paddings(g, input, pad):\n if not sym_help._is_packed_list(pad) and sym_help._is_list(pad) and sym_help._is_scalar_list(pad):\n pad = g.op(\"ConcatFromSequence\", pad, axis_i=0, new_axis_i=1)\n # The desired order of paddings is\n # dim_0_begin, dim_1_begin, ... , dim_0_end, ..., dim_n_end.\n # n is the dimension of input.\n # Assume zero-dimensions in the beginning, pad the \"pad\" sequence with zeros in the beginning\n pad_len = torch.onnx.symbolic_opset9.size(g, pad, g.op(\"Constant\", value_t=torch.tensor([0])))\n # Set extension = [0] * (dim * 2 - len(pad))\n rank = sym_help._get_tensor_rank(input)\n if rank is None:\n rank = g.op(\"Size\", g.op(\"Shape\", input))\n else:\n rank = g.op(\"Constant\", value_t=torch.tensor(rank, dtype=torch.int64))\n extension = g.op(\"Sub\", g.op(\"Mul\", rank,\n g.op(\"Constant\", value_t=torch.tensor(2, dtype=torch.int64))), pad_len)\n # Concat pad with extension: paddings = [dim_n_begin, dim_n_end, dim_n-1_begin, dim_n-1_end, 0, 0, ... ]\n # Currently ONNX only supports int64 type for Pad\n pad = g.op(\"Cast\", pad, to_i=sym_help.cast_pytorch_to_onnx[\"Long\"])\n paddings = g.op(\"Concat\", pad, g.op(\"ConstantOfShape\", extension, value_t=torch.tensor([0], dtype=torch.int64)), axis_i=0)\n # Reshape and reverse order and collate first beginnings and then ends\n # paddings = [[..., 0, dim_n-1_begin, dim_n_begin],\n # [..., 0, dim_n-1_end, dim_n_end]]\n # Reshape back to 1-D paddings = [..., 0, dim_n - 1_begin, dim_n_begin, ..., 0, dim_n - 1_end, dim_n_end]\n paddings = sym_help._reshape_helper(g, paddings, g.op(\"Constant\", value_t=torch.tensor([-1, 2])))\n paddings = g.op(\"Transpose\", torch.onnx.symbolic_opset10.flip(g, paddings, [0]), perm_i=[1, 0])\n paddings = sym_help._reshape_helper(g, paddings, g.op(\"Constant\", value_t=torch.tensor([-1])))\n padding_c = g.op(\"Cast\", paddings, to_i=sym_help.cast_pytorch_to_onnx[\"Long\"])\n return padding_c\n\n\ndef constant_pad_nd(g, input, padding, value=None):\n mode = \"constant\"\n value = sym_help._maybe_get_scalar(value)\n value = sym_help._if_scalar_type_as(g, value, input)\n pad = _prepare_onnx_paddings(g, input, padding)\n return g.op(\"Pad\", input, pad, value, mode_s=mode)\n\n\ndef reflection_pad(g, input, padding):\n mode = \"reflect\"\n paddings = _prepare_onnx_paddings(g, input, padding)\n return g.op(\"Pad\", input, paddings, mode_s=mode)\n\n\ndef replication_pad(g, input, padding):\n mode = \"edge\"\n paddings = _prepare_onnx_paddings(g, input, padding)\n return g.op(\"Pad\", input, paddings, mode_s=mode)\n\n\nreflection_pad1d = reflection_pad\nreflection_pad2d = reflection_pad\nreflection_pad3d = reflection_pad\nreplication_pad1d = replication_pad\nreplication_pad2d = replication_pad\nreplication_pad3d = replication_pad\n\n\ndef linalg_det(g, self):\n return g.op(\"Det\", self)\n\n\ndef logdet(g, input):\n from torch.onnx.symbolic_opset9 import log\n return log(g, linalg_det(g, input))\n\n\ndef arange(g, *args):\n def _get_arange_dtype(dtype):\n dtype = sym_help._maybe_get_const(dtype, \"i\")\n return dtype\n\n if len(args) == 2 or len(args) == 5:\n if len(args) == 2:\n # aten::arange(Scalar end, Tensor out)\n dtype = None\n else:\n # aten::arange(Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)\n dtype = _get_arange_dtype(args[1])\n type, end, start, step = sym_help._arange_cast_helper(g, end=args[0], dtype=dtype)\n start_default = g.op(\"Constant\", value_t=torch.tensor(0, dtype=sym_help.scalar_type_to_pytorch_type[type]))\n delta_default = g.op(\"Constant\", value_t=torch.tensor(1, dtype=sym_help.scalar_type_to_pytorch_type[type]))\n arange_tensor = g.op(\"Range\", start_default, end, delta_default)\n elif len(args) == 4 or len(args) == 7:\n if len(args) == 4:\n # aten::arange(Scalar start, Scalar end, Scalar step, Tensor out)\n dtype = None\n else:\n # aten::arange(Scalar start, Scalar end, Scalar step, ScalarType dtype, Layout, Device, bool pin_memory)\n dtype = _get_arange_dtype(args[3])\n type, end, start, step = sym_help._arange_cast_helper(g, start=args[0], end=args[1], step=args[2], dtype=dtype)\n arange_tensor = g.op(\"Range\", start, end, step)\n elif len(args) == 6:\n # aten::arange(Scalar start, Scalar end, ScalarType dtype, Layout, Device, bool pin_memory)\n dtype = _get_arange_dtype(args[2])\n type, end, start, step = sym_help._arange_cast_helper(g, start=args[0], end=args[1], dtype=dtype)\n delta_default = g.op(\"Constant\", value_t=torch.tensor(1, dtype=sym_help.scalar_type_to_pytorch_type[type]))\n arange_tensor = g.op(\"Range\", start, end, delta_default)\n else:\n raise NotImplementedError(\"Unknown aten::arange signature taking \" + str(len(args)) + \" arguments.\")\n return arange_tensor\n\n\n@parse_args(\"v\", \"i\")\ndef _dim_arange(g, like, dim):\n like_shape = g.op(\"Shape\", like)\n stop = g.op(\"Gather\", like_shape, g.op(\"Constant\", value_t=torch.tensor(dim)), axis_i=0)\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"_caffe2::Range\", stop)\n return arange(g, stop, 4, None, None, None)\n\n\ndef size(g, self, dim=None):\n if dim is None:\n return g.op(\"Shape\", self)\n return sym_help._size_helper(g, self, dim)\n\n\ndef squeeze(g, self, dim=None):\n if dim is None:\n return g.op(\"Squeeze\", self)\n\n dim = sym_help._get_const(dim, \"i\", \"dim\")\n\n input_rank = sym_help._get_tensor_rank(self)\n adjusted_dim = dim\n if input_rank is not None and dim < 0:\n adjusted_dim += input_rank\n dim_size = sym_help._get_tensor_dim_size(self, adjusted_dim)\n if (dim < 0 and input_rank is None) or dim_size is None:\n # If onnx shape inference is not on, export always as dynamic.\n # Because we cannot tell if observed static shape is also static at runtime.\n # create \"cond\" node (condition is shape[i]==1)\n dim_constant = g.op(\"Constant\", value_t=torch.tensor([dim]))\n size = sym_help._size_helper(g, self, dim_constant)\n const_one = g.op(\"Constant\", value_t=torch.ones(1, dtype=torch.int64))\n cond = g.op(\"Equal\", size, const_one)\n # create the \"If\" node and add the \"then\" and \"else\" blocks to it.\n if_node_outputs = g.op(\"If\", cond)\n if_node = if_node_outputs.node()\n if_block = torch.onnx.utils._add_block(if_node)\n squeeze_ = sym_help._squeeze_helper(if_block, self, [dim])\n torch.onnx.utils._add_output_to_block(if_block, squeeze_)\n else_block = torch.onnx.utils._add_block(if_node)\n identity_ = else_block.op(\"Identity\", self)\n torch.onnx.utils._add_output_to_block(else_block, identity_)\n return if_node_outputs\n\n # For static input shape\n dim = adjusted_dim\n if dim_size > 1:\n warnings.warn(\"This model contains a squeeze operation on dimension \" + str(dim) + \". The size of \" +\n \"this dimension in the given input is \" + str(dim_size) + \". The model will \" +\n \"be exported without the squeeze node. If the model is intended to be used with dynamic \" +\n \"input shapes, please export with dynamic_axes argument.\")\n return self\n return sym_help._squeeze_helper(g, self, [dim])\n\n\n@parse_args(\"v\", \"i\")\ndef unsqueeze(g, self, dim):\n return sym_help._unsqueeze_helper(g, self, [dim])\n\ndef mm(g, self, other):\n return g.op(\"Gemm\", self, other, beta_f=0.0, alpha_f=1.0)\n\n\ndef index(g, self, index):\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"ATen\", self, index, operator_s=\"index\")\n\n if sym_help._is_packed_list(index):\n indices = sym_help._unpack_list(index)\n else:\n indices = [index]\n\n # Handle single mask index.\n if len(indices) == 1:\n index = indices[0]\n if not sym_help._is_none(index) and (index.type().scalarType() == \"Bool\" or index.type().scalarType() == \"Byte\"):\n from torch.onnx.symbolic_opset9 import nonzero\n index = nonzero(g, index)\n return g.op(\"GatherND\", self, index)\n from torch.onnx.symbolic_opset9 import index as index_opset9\n return index_opset9(g, self, index)\n\n\ndef index_fill(g, self, dim, index, value):\n dim_value = sym_help._parse_arg(dim, \"i\")\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"ATen\", self, index, value, dim_i=dim_value, operator_s=\"index_fill\")\n expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)\n value = sym_help._maybe_get_scalar(value)\n value = sym_help._if_scalar_type_as(g, value, self)\n expanded_value = expand(g, value, expanded_index_shape, None)\n return scatter(g, self, dim, expanded_index, expanded_value)\n\n\ndef index_copy(g, self, dim, index, source):\n dim_value = sym_help._parse_arg(dim, \"i\")\n if sym_help._operator_export_type == torch.onnx.OperatorExportTypes.ONNX_ATEN_FALLBACK:\n return g.op(\"ATen\", self, index, source, dim_i=dim_value, operator_s=\"index_copy\")\n expanded_index_shape, expanded_index = sym_help._index_fill_reshape_helper(g, self, dim, index)\n return scatter(g, self, dim, expanded_index, source)\n\n\ndef __rshift_(g, self, other):\n # make sure to cast other to self's type\n # (when self is long, make sure that other is not float)\n if other.type().scalarType() != self.type().scalarType():\n other = g.op(\"Cast\", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])\n\n if self.type().scalarType() == \"Byte\":\n return g.op(\"BitShift\", self, other, direction_s=\"RIGHT\")\n\n two = g.op(\"Constant\", value_t=torch.tensor(2, dtype=torch.float32))\n # exponent (same type as self) has to be float or double in onnx::Pow\n if not sym_help._is_fp(self):\n other = g.op(\"Cast\", other, to_i=sym_help.cast_pytorch_to_onnx[\"Float\"])\n two_pow = g.op(\"Pow\", two, other)\n two_pow = g.op(\"Cast\", two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])\n rshift = g.op(\"Div\", self, two_pow)\n return rshift\n\n\ndef __lshift_(g, self, other):\n # make sure to cast other to self's type\n # (when self is long, make sure that other is not float)\n if other.type().scalarType() != self.type().scalarType():\n other = g.op(\"Cast\", other, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])\n\n if self.type().scalarType() == \"Byte\":\n return g.op(\"BitShift\", self, other, direction_s=\"LEFT\")\n\n two = g.op(\"Constant\", value_t=torch.tensor(2, dtype=torch.float32))\n # exponent (same type as self) has to be float or double in onnx::Pow\n if not sym_help._is_fp(self):\n other = g.op(\"Cast\", other, to_i=sym_help.cast_pytorch_to_onnx[\"Float\"])\n two_pow = g.op(\"Pow\", two, other)\n two_pow = g.op(\"Cast\", two_pow, to_i=sym_help.cast_pytorch_to_onnx[self.type().scalarType()])\n lshift = g.op(\"Mul\", self, two_pow)\n return lshift\n\n\ndef _get_im2col_indices_along_dim(g, input_d, kernel_size_d, dilation_d, padding_d, stride_d):\n # Input is always 4-D (N, C, H, W)\n # Calculate indices of sliding blocks along spatial dimension\n # Slide kernel over input each dim d:\n # each dimension d ranges from 0 to input[d]+2xpadding[d]-dilation[d]x(kernel_size[d]-1)\n # with steps = stride\n\n blocks_d = g.op(\"Add\", input_d, g.op(\"Constant\", value_t=torch.tensor(padding_d * 2)))\n blocks_d = g.op(\"Sub\", blocks_d, g.op(\"Constant\", value_t=torch.tensor(dilation_d * (kernel_size_d - 1))))\n\n # Stride kernel over input and find starting indices along dim d\n blocks_d_indices = g.op(\"Range\", g.op(\"Constant\", value_t=torch.tensor(0)),\n blocks_d, g.op(\"Constant\", value_t=torch.tensor(stride_d)))\n\n # Apply dilation on kernel and find its indices along dim d\n kernel_grid = torch.arange(0, kernel_size_d * dilation_d, dilation_d)\n kernel_grid = g.op(\"Constant\", value_t=kernel_grid.unsqueeze(0))\n\n # Broadcast and add kernel staring positions (indices) with\n # kernel_grid along dim d, to get block indices along dim d\n blocks_d_indices = sym_help._unsqueeze_helper(g, blocks_d_indices, [0]) # Reshape to [1, -1]\n kernel_mask = sym_help._reshape_helper(g, kernel_grid, g.op(\"Constant\", value_t=torch.tensor([-1, 1])))\n block_mask = g.op(\"Add\", blocks_d_indices, kernel_mask)\n\n return block_mask\n\n\ndef _get_im2col_padded_input(g, input, padding_h, padding_w):\n # Input is always 4-D tensor (N, C, H, W)\n # Padding tensor has the following format: (padding_h, padding_w)\n # Reshape the padding to follow ONNX format: (dim1_begin, dim2_begin,...,dim1_end, dim2_end,...)\n pad = g.op(\"Constant\", value_t=torch.LongTensor([0, 0, padding_h, padding_w] * 2))\n return g.op(\"Pad\", input, pad)\n\n\ndef _get_im2col_output_shape(g, input, kernel_h, kernel_w):\n batch_dim = size(g, input, g.op(\"Constant\", value_t=torch.tensor(0)))\n channel_dim = size(g, input, g.op(\"Constant\", value_t=torch.tensor(1)))\n channel_unfolded = g.op(\"Mul\", channel_dim,\n g.op(\"Constant\", value_t=torch.tensor(kernel_h * kernel_w)))\n\n return g.op(\"Concat\",\n sym_help._unsqueeze_helper(g, batch_dim, [0]),\n sym_help._unsqueeze_helper(g, channel_unfolded, [0]),\n g.op(\"Constant\", value_t=torch.tensor([-1])), axis_i=0)\n\n\n@parse_args(\"v\", \"is\", \"is\", \"is\", \"is\")\ndef im2col(g, input, kernel_size, dilation, padding, stride):\n # Input is always 4-D tensor (N, C, H, W)\n # All other args are int[2]\n\n input_h = size(g, input, g.op(\"Constant\", value_t=torch.tensor(2)))\n input_w = size(g, input, g.op(\"Constant\", value_t=torch.tensor(3)))\n\n stride_h, stride_w = stride[0], stride[1]\n padding_h, padding_w = padding[0], padding[1]\n dilation_h, dilation_w = dilation[0], dilation[1]\n kernel_h, kernel_w = kernel_size[0], kernel_size[1]\n\n blocks_row_indices = _get_im2col_indices_along_dim(g, input_h, kernel_h, dilation_h, padding_h, stride_h)\n blocks_col_indices = _get_im2col_indices_along_dim(g, input_w, kernel_w, dilation_w, padding_w, stride_w)\n\n output_shape = _get_im2col_output_shape(g, input, kernel_h, kernel_w)\n padded_input = _get_im2col_padded_input(g, input, padding_h, padding_w)\n\n # For a 4D matrix of size (1, 1, 3, 3) as below with kernel_size=2, stride=1, and dilation=1\n # [[[[1., 2., 3.,],\n # [4., 5., 6.,],\n # [7., 8., 9.,]]]]\n # First gather indices along rows (dim=2) with blocks_row_indices = [[0,1], [1,2]] to get:\n # [[[[[1., 2., 3.],\n # [4., 5., 6.]],\n # [[4., 5., 6.],\n # [7., 8., 9.]]]]]\n # And then gather along cols (dim=4) with blocks_row_indices = [[0,1], [1,2]] to get:\n # [[[[[[1., 2.],\n # [4., 5.]],\n # [[2., 3.],\n # [5., 6]]],\n # [[[4., 5.],\n # [7., 8.]],\n # [[5., 6.],\n # [8., 9.]]]]]]\n # Transpose dims 3 (depth) and 4 (rows), and then reshape to output shape (1, 1, 4, 4) to get:\n # [[[1., 2., 4., 5.],\n # [2., 3., 5., 6.],\n # [4., 5., 7., 8.],\n # [5., 6., 8., 9.]]]\n output = g.op(\"Gather\", padded_input, blocks_row_indices, axis_i=2)\n output = g.op(\"Gather\", output, blocks_col_indices, axis_i=4)\n output = g.op(\"Transpose\", output, perm_i=[0, 1, 2, 4, 3, 5])\n return sym_help._reshape_helper(g, output, output_shape)\n\n\ndef narrow(g, input, dim, start, length):\n from torch.onnx.symbolic_helper import _slice_helper\n end = g.op(\"Add\", start, length)\n return _slice_helper(g, input, axes=dim, starts=start, ends=end, dynamic_slice=True)\n\n\n@parse_args(\"v\", \"i\", \"i\")\ndef flatten(g, input, start_dim, end_dim):\n dim = sym_help._get_tensor_rank(input)\n # use ONNX's Flatten operator for cases where the output shape is 2D\n if start_dim == 1:\n if (end_dim == -1 or (dim is not None and end_dim == dim - 1)):\n return g.op(\"Flatten\", input, axis_i=start_dim)\n elif start_dim == 0:\n if (end_dim == -2 or (dim is not None and end_dim == dim - 2)):\n return g.op(\"Flatten\", input, axis_i=end_dim + 1)\n if dim is None:\n return _unimplemented(\"dim\",\n \"ONNX and PyTorch use different strategies to split the input. \"\n \"Input rank must be known at export time.\")\n # if end_dim is negative add dim\n if end_dim < 0 :\n end_dim = dim + end_dim\n\n return sym_help._flatten_helper(g, input, start_dim, end_dim, dim)\n\n\n@parse_args(\"v\", \"v\", \"v\", \"i\", \"i\", \"i\", \"v\", \"i\", \"i\")\ndef embedding_bag(g,\n embedding_matrix,\n indices,\n offsets,\n scale_grad_by_freq,\n mode,\n sparse,\n per_sample_weights,\n include_last_offset,\n padding_idx):\n if scale_grad_by_freq and sym_help._training_mode:\n return sym_help._onnx_unsupported(\"embedding_bag with scale_grad_by_freq for training mode\")\n if padding_idx is not None and padding_idx >= 0:\n raise RuntimeError(\"embedding_bag with padding_idx\")\n\n loop_condition = g.op(\"Constant\", value_t=torch.tensor(1))\n loop_condition = g.op(\"Cast\", loop_condition, to_i=9)\n zero = g.op(\"Constant\", value_t=torch.tensor([0]))\n\n indices_len = sym_help._unsqueeze_helper(g,\n sym_help._size_helper(g, indices, g.op(\"Constant\", value_t=torch.tensor(0))),\n [0])\n if not include_last_offset:\n offsets = [offsets, indices_len]\n offsets = g.op(\"Concat\", *offsets, axis_i=0)\n\n # Offsets holds the starting index position of each bag. So we create a list of the indices slices (determined by\n # offsets) and gather those indices in indices_row. Then we use this subset of indices to gather from embeddings.\n # The embeddings output is a loop scan output, so we can avoid creating a sequence and inserting elements in.\n offsets_starts = sym_help._slice_helper(g, offsets, axes=[0], starts=[0], ends=[maxsize], steps=[1])\n offsets_ends = sym_help._slice_helper(g, offsets, axes=[0], starts=[1], ends=[maxsize], steps=[1])\n\n loop_len = sym_help._size_helper(g, offsets_ends, g.op(\"Constant\", value_t=torch.tensor(0)))\n loop = g.op(\"Loop\", loop_len, loop_condition)\n\n loop_block = _add_block(loop.node())\n block_input_iter = _add_input_to_block(loop_block)\n cond = _add_input_to_block(loop_block)\n\n indices_start = loop_block.op(\"Gather\", offsets_starts, block_input_iter, axis_i=0)\n indices_end = loop_block.op(\"Gather\", offsets_ends, block_input_iter, axis_i=0)\n indices_start = sym_help._unsqueeze_helper(loop_block, indices_start, [0])\n indices_end = sym_help._unsqueeze_helper(loop_block, indices_end, [0])\n\n indices_row = loop_block.op(\"Slice\", indices, indices_start, indices_end, zero)\n embeddings = loop_block.op(\"Gather\", embedding_matrix, indices_row, axis_i=0)\n if not sym_help._is_none(per_sample_weights):\n per_sample_weights_row = loop_block.op(\"Slice\", per_sample_weights,\n indices_start,\n indices_end,\n zero)\n per_sample_weights_row = sym_help._unsqueeze_helper(loop_block, per_sample_weights_row, [1])\n embeddings = loop_block.op(\"Mul\", embeddings, per_sample_weights_row)\n if mode == 0:\n embeddings = sym_help._reducesum_helper(loop_block, embeddings, axes_i=[0], keepdims_i=0)\n elif mode == 1:\n embeddings = loop_block.op(\"ReduceMean\", embeddings, axes_i=[0], keepdims_i=0)\n else:\n embeddings = loop_block.op(\"ReduceMax\", embeddings, axes_i=[0], keepdims_i=0)\n\n cond_out = loop_block.op(\"Cast\", loop_condition, to_i=9)\n _add_output_to_block(loop_block, cond_out)\n _add_output_to_block(loop_block, embeddings)\n\n # aten::embedding_bag returns a tuple of 4 elements: output, offset2bag, bag_size, max_indices.\n # But the last three outputs are not used in torch.nn.EmbeddingBag or torch.nn.functional.embedding_bag.\n return loop.node().output(), None, None, None\n\n\ndef prim_ConstantChunk(g, self, chunks, dim):\n input_shape = g.op(\"Shape\", self)\n axis = g.op(\"Constant\", value_t=torch.tensor([dim], dtype=torch.long))\n input_shape_dim = g.op(\"Gather\", input_shape, axis, axis_i=0)\n start = g.op(\"Constant\", value_t=torch.tensor([0], dtype=torch.long))\n chunk_size = g.op(\"Constant\", value_t=torch.tensor([chunks], dtype=torch.long))\n chunk_size_minus_1 = g.op(\"Constant\", value_t=torch.tensor([chunks - 1], dtype=torch.long))\n input_shape_dim_shift = g.op(\"Add\", input_shape_dim, chunk_size_minus_1)\n chunk_dim = g.op(\"Div\", input_shape_dim_shift, chunk_size)\n res = []\n for i in range(chunks):\n index = g.op(\"Constant\", value_t=torch.tensor([i + 1], dtype=torch.long))\n end = g.op(\"Mul\", chunk_dim, index)\n res.append(g.op(\"Slice\", self, start, end, axis))\n start = end\n return res\n\ndef chunk(g, self, chunks, dim):\n # Calculate chunk size for dynamic chunk\n dim_size = g.op(\"Gather\", g.op(\"Shape\", self), dim, axis_i=0)\n chunk_size_s = g.op(\"Sub\", chunks, g.op(\"Constant\", value_t=torch.tensor([1], dtype=torch.long)))\n chunk_size = g.op(\"Div\", g.op(\"Add\", dim_size, chunk_size_s), chunks)\n # Create splits vector\n chunk_vec = [expand(g, chunk_size, chunk_size_s, None),\n g.op(\"Sub\", dim_size, g.op(\"Mul\", chunk_size, chunk_size_s))]\n chunk_vec = g.op(\"Concat\", *chunk_vec, axis_i=0)\n return split(g, self, chunk_vec, dim)\n\n\ndef normal(g, loc, scale, seed):\n # If you can sample from a given distribution with mean 0 and variance 1, then you can easily sample from a\n # scale-location transformation of that distribution, which has mean μ and variance σ's square. If x is a sample\n # from a mean 0 and variance 1 distribution then\n # σx+μ\n # is a sample with mean μ and variance σ's square.\n result = mul(g, scale, g.op(\"RandomNormalLike\", loc))\n return add(g, result, loc)\n",
"#!/usr/bin/env python3\n# Owner(s): [\"oncall: distributed\"]\n\nimport sys\n\nimport torch\nimport torch.distributed as dist\n\nif not dist.is_available():\n print(\"Distributed not available, skipping tests\", file=sys.stderr)\n sys.exit(0)\n\nfrom torch.testing._internal.common_utils import IS_IN_CI, run_tests\nfrom torch.testing._internal.distributed.rpc.faulty_rpc_agent_test_fixture import (\n FaultyRpcAgentTestFixture,\n)\nfrom torch.testing._internal.distributed.rpc_utils import (\n FAULTY_AGENT_TESTS,\n generate_tests,\n)\n\n\n# On CircleCI these tests are already run on CPU jobs, thus to save resources do\n# not run them on GPU jobs, since thet wouldn't provide additional test signal.\nif not (IS_IN_CI and torch.cuda.is_available()):\n globals().update(\n generate_tests(\n \"Faulty\",\n FaultyRpcAgentTestFixture,\n FAULTY_AGENT_TESTS,\n __name__,\n )\n )\n\n\nif __name__ == \"__main__\":\n run_tests()\n",
"from dataclasses import dataclass\nfrom typing import List, cast\n\nimport torch\nfrom torch.distributed._sharding_spec import ShardMetadata\nfrom torch.distributed.remote_device import _remote_device\n\n\n@dataclass\nclass Shard(object):\n \"\"\"\n Container which holds the data for a shard as a Tensor and also\n the associated metadata for that shard.\n\n Args:\n tensor(torch.Tensor): Local tensor for the shard.\n metadata(:class `torch.distributed._sharded_tensor.ShardMetadata`):\n The metadata for the shard, including offsets, lengths and device placement.\n \"\"\"\n __slots__ = ['tensor', 'metadata']\n tensor: torch.Tensor\n metadata: ShardMetadata\n\n def __post_init__(self):\n # verification between local tensor and metadata\n if list(self.tensor.size()) != self.metadata.shard_sizes:\n raise ValueError(\n \"Shard tensor size does not match with metadata.shard_lengths! \"\n f\"Found shard tensor size: {list(self.tensor.size())}, \"\n f\"metadata.shard_lengths: {self.metadata.shard_sizes}, \"\n )\n placement_device = cast(_remote_device, self.metadata.placement).device()\n if placement_device != self.tensor.device:\n raise ValueError(\n f\"Local shard tensor device does not match with local Shard's placement! \"\n f\"Found local shard tensor device: {self.tensor.device}, \"\n f\"local shard metadata placement device: {placement_device}\"\n )\n\n @classmethod\n def from_tensor_and_offsets(cls, tensor: torch.Tensor, shard_offsets: List[int], rank: int):\n \"\"\"\n Creates a Shard of a ShardedTensor from a local torch.Tensor, shard_offsets and rank.\n\n Args:\n tensor(torch.Tensor): Local tensor for the shard.\n shard_offsets(List[int]): List of integers specify the offset\n of the shard on each dimension.\n rank(int): Specify the rank for the shard.\n \"\"\"\n shard_sizes = list(tensor.size())\n placement = _remote_device(f\"rank:{rank}/{str(tensor.device)}\")\n shard_meta = ShardMetadata(\n shard_offsets=shard_offsets,\n shard_sizes=shard_sizes,\n placement=placement\n )\n return Shard(tensor, shard_meta)\n",
"import logging\nfrom typing import Tuple, Any, List, Dict\n\nimport torch\nfrom torch.fx.node import map_aggregate\n\nfrom .quantization_state import (\n AutoQuantizationState,\n)\nfrom .utils import (\n trace_with_inputs,\n is_leaf,\n HookType,\n get_torch_function_hook_type,\n get_module_hook_type,\n)\nfrom .model_utils import (\n pack_weights_for_functionals,\n attach_scale_zp_values_to_model,\n attach_op_convert_info_to_model,\n)\nfrom . import auto_trace_rewriter\n\nlogger = logging.getLogger('auto_trace')\nlogging.basicConfig(level=logging.DEBUG)\n# logging.basicConfig(level=logging.INFO)\n\n# enabling this tanks performance, make sure to disable for benchmarking\n# TODO(future PR): clean this up\nenable_logging = False\n# enable_logging = True\n\n\ndef add_auto_observation(\n model : torch.nn.Module,\n example_inputs: Tuple[Any],\n input_dtypes: Any = (torch.float,), # must be same structure as model inputs\n output_dtypes: Any = (torch.float,), # must be same structure as model outputs\n) -> torch.nn.Module:\n def convert_to_interception_proxy(x):\n if isinstance(x, torch.Tensor):\n return x.as_subclass(QuantizationPrepareTensorProxy) # type: ignore[arg-type]\n else:\n return x\n\n cur_module = None\n first_call = True\n module_stack : List[torch.nn.Module] = []\n # Counter for tensor IDs, will be modified inplace by quant state.\n # This is used to track tensors from output ops to input ops. For example,\n # if op_n had a tensor output with id=1, and op_n+2 had a tensor input with\n # id=1, we know that the output of op_n is the input to op_n+2. Note,\n # this is a list because it needs to incremented inplace.\n qtensor_id = [0]\n module_id_to_fqn: Dict[int, str] = {}\n\n # Counter for global quantizeable ops, useful for intermediate activation\n # logging.\n global_op_idx = [0]\n\n class QuantizationPrepareTensorProxy(torch.Tensor):\n \"\"\"\n An override of `torch.Tensor` to enable dynamic tracing for\n quantization.\n\n For each function with a `__torch_function__` override, this proxy does\n the following for functions which need quantization:\n\n 1. calls `_auto_quant_state.validate_cur_op` to validate that\n the currently seen op is the same as what was recorded during tracing\n 2. calls `_auto_quant_state.op_prepare_before_hook`\n 3. executes the original function\n 4. calls `_auto_quant_state.op_prepare_after_hook`\n 5. calls `_auto_quant_state.mark_cur_op_complete` to increment\n the current op index in preparation for the next op\n\n Otherwise, calls the original function.\n \"\"\"\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n # to prevent printing things from going into an infinite loop\n if func == torch.Tensor.__repr__:\n return super().__torch_function__(func, types, args, kwargs)\n if enable_logging:\n logger.debug(f'__torch_function__ {str(func)} len_args {len(args)}')\n\n nonlocal qtensor_id\n nonlocal cur_module\n kwargs = kwargs if kwargs else {}\n # if we are in a function, the current module is always a parent\n parent_module = cur_module\n hook_type = get_torch_function_hook_type(parent_module, func)\n\n if hook_type is HookType.OP_HOOKS:\n qstate = parent_module._auto_quant_state # type: ignore[attr-defined]\n fqn = module_id_to_fqn[id(parent_module)] if parent_module else None\n if not first_call:\n qstate.validate_cur_op(func)\n # run \"before\" hook\n args, kwargs = qstate.op_prepare_before_hook(\n func, args, kwargs, first_call, qtensor_id, fqn, parent_module)\n # forward\n output = super().__torch_function__(func, types, args, kwargs)\n # run \"after\" hook\n output = qstate.op_prepare_after_hook(\n func, output, args, first_call, qtensor_id, parent_module,\n global_op_idx)\n qstate.mark_cur_op_complete(func)\n else:\n output = super().__torch_function__(func, types, args, kwargs)\n\n # TODO: is this right? Don't really understand this\n if output is NotImplemented:\n with torch._C.DisableTorchFunction():\n output = func(*args, **kwargs).as_subclass(\n QuantizationPrepareTensorProxy)\n assert output is not NotImplemented\n\n return output\n\n def __repr__(self):\n return f'QuantizationPrepareTensorProxy({super().__repr__()})'\n\n # TODO(future PR): add other math overrides\n\n class QuantizationInterceptionModule(type(model)): # type: ignore[misc]\n \"\"\"\n An override of user defined subclass of `nn.Module` to enable\n dynamic tracing for quantization.\n\n `cur_module` keeps track of the current module in the stack.\n\n During the fist call, an `AutoQuantizationState` object is created and\n attached to each non-leaf modules which we need to check for\n quantizeable operations.\n\n We override the `__call__` function to do the following for each\n module:\n\n If the module is an op which needs quantization:\n\n 1. calls `_auto_quant_state.validate_cur_op` to validate that\n the currently seen op is the same as what was recorded during tracing\n 2. calls parent module's `._auto_quant_state.op_prepare_before_hook`\n 3. executes the original module forward\n 4. calls parent module's `_auto_quant_state.op_prepare_after_hook`\n 5. calls `_auto_quant_state.mark_cur_op_complete` to increment\n the current op index in preparation for the next op\n\n If the module can contain children ops that need quantization:\n\n 1. calls `_auto_quant_state.inputs_prepare_hook` (not implemented yet)\n 2. executes the original module forward\n 3. calls `_auto_quant_state.outputs_prepare_hook`\n\n Otherwise, calls the original module forward.\n \"\"\"\n\n def __call__(self, *args, **kwargs):\n new_args = map_aggregate(args, convert_to_interception_proxy)\n new_kwargs = map_aggregate(kwargs, convert_to_interception_proxy)\n orig_module_call = torch.nn.Module.__call__\n orig_nn_sequential_forward = torch.nn.Sequential.forward\n\n def _patched_module_call(self, *args, **kwargs):\n\n if enable_logging:\n logger.debug(f\"_patched_module_call: {type(self)}\")\n\n nonlocal cur_module\n old_module = cur_module\n cur_module = self\n try:\n parent_module = module_stack[-1] if len(module_stack) else None\n module_stack.append(self)\n fqn = module_id_to_fqn.get(id(self), None)\n\n if enable_logging:\n fqn = module_id_to_fqn.get(id(self), None)\n logger.debug(f\"\\nstarting fqn {fqn}\")\n\n hook_type = get_module_hook_type(parent_module, cur_module)\n\n if hook_type is HookType.OP_HOOKS:\n parent_qstate: AutoQuantizationState = \\\n parent_module._auto_quant_state # type: ignore[union-attr, assignment]\n # before hooks\n if not first_call:\n parent_qstate.validate_cur_op(cur_module)\n args, kwargs = parent_qstate.op_prepare_before_hook(\n cur_module, args, kwargs, first_call, qtensor_id,\n fqn, cur_module)\n\n # original forward\n output = orig_module_call(self, *args, **kwargs)\n\n # after hooks\n # TODO is it correct to call_cur_module twice here?\n output = parent_qstate.op_prepare_after_hook(\n cur_module, output, args, first_call, qtensor_id,\n cur_module, global_op_idx)\n parent_qstate.mark_cur_op_complete(cur_module)\n\n elif hook_type is HookType.MODULE_IO_HOOKS:\n # TODO(future PR): add inputs io hook\n\n cur_qstate = cur_module._auto_quant_state\n cur_qstate.reset_to_new_call()\n\n # original forward\n output = orig_module_call(self, *args, **kwargs)\n\n # after hooks\n output = cur_qstate.outputs_prepare_hook(\n output, first_call, qtensor_id)\n cur_qstate.validate_is_at_last_seen_idx()\n\n elif hook_type is HookType.ARG_DEQUANTS:\n output = orig_module_call(self, *args, **kwargs)\n # if this fp32 was inplace, make sure to set the output dtype\n # back to torch.float\n if hasattr(output, '_qtensor_info'):\n del output._qtensor_info\n\n else:\n output = orig_module_call(self, *args, **kwargs)\n\n if enable_logging:\n fqn = module_id_to_fqn.get(id(self), None)\n logger.debug(f\"\\nending fqn {fqn}\")\n\n return output\n finally:\n module_stack.pop()\n cur_module = old_module\n\n torch.nn.Module.__call__ = _patched_module_call\n torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]\n nonlocal first_call\n try:\n if first_call:\n # Create a list before iterating because we are adding new\n # named modules inside the loop.\n named_modules = list(self.named_modules())\n for k, v in named_modules:\n\n # k is the global FQN, i.e. 'foo.bar.baz'\n # v is the module instance\n #\n # we need to associate the global FQN with SeenOp\n # for modules, this is the module FQN\n # for functions, this is the parent module FQN\n module_id_to_fqn[id(v)] = k\n\n has_qconfig = hasattr(v, 'qconfig') and v.qconfig is not None\n if has_qconfig and not is_leaf(v):\n if v is self:\n # for the top level module only, specify input\n # and output dtypes\n v._auto_quant_state = AutoQuantizationState(\n v.qconfig, input_dtypes, output_dtypes)\n pass\n else:\n v._auto_quant_state = AutoQuantizationState(\n v.qconfig)\n\n global_op_idx[0] = 0\n\n output = super().__call__(*new_args, **new_kwargs)\n return output\n finally:\n torch.nn.Module.__call__ = orig_module_call\n torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]\n first_call = False\n\n\n model.__class__ = QuantizationInterceptionModule\n # create the graph\n trace_with_inputs(model, example_inputs)\n return model\n\n\n# TODO(future PR): add serialization support\ndef add_auto_convert(module : torch.nn.Module) -> torch.nn.Module:\n def convert_to_dispatch_proxy(x):\n if isinstance(x, torch.Tensor):\n return x.as_subclass(QuantizationConvertTensorProxy) # type: ignore[arg-type]\n else:\n return x\n\n module_id_to_fqn: Dict[int, str] = {}\n # Counter for global quantizeable ops, useful for intermediate activation\n # logging.\n global_op_idx = [0]\n\n class QuantizationConvertTensorProxy(torch.Tensor):\n \"\"\"\n An override of `torch.Tensor` to enable dynamic dispatch for\n quantization inference.\n\n For each function with a `__torch_fuction__` override, this proxy does\n the following for functions which need quantization:\n\n 1. calls `_auto_quant_state.validate_cur_op` to validate that\n the currently seen op is the same as what was recorded during tracing\n 2. calls `_auto_quant_state.op_convert_before_hook`.\n 3. executes the function, with target, args and kwargs possibly modified\n by (2)\n 4. calls `_auto_quant_state.inference_function_after_hook`.\n 5. calls `_auto_quant_state.mark_cur_op_complete` to increment\n the current op index in preparation for the next op\n\n Otherwise, calls the original function.\n \"\"\"\n\n @classmethod\n def __torch_function__(cls, func, types, args=(), kwargs=None):\n # to prevent printing things from going into an infinite loop\n if func == torch.Tensor.__repr__:\n return super().__torch_function__(func, types, args, kwargs)\n\n kwargs = kwargs if kwargs else {}\n # if we are in a function, the current module is always a parent\n parent_module = cur_module\n hook_type = get_torch_function_hook_type(parent_module, func)\n\n if enable_logging:\n with torch._C.DisableTorchFunction():\n logger.debug(\n f\"__torch_function__ {func} \" +\n f\"hook_type {hook_type} \" +\n # f\"arg_types {[type(arg) for arg in args]}) \" +\n f\"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]}\")\n\n if hook_type is HookType.OP_HOOKS:\n qstate: AutoQuantizationState = parent_module._auto_quant_state # type: ignore[union-attr]\n # before hooks\n qstate.validate_cur_op(func)\n func, args, kwargs = qstate.op_convert_before_hook(\n func, args, kwargs, parent_module) # type: ignore[arg-type]\n\n # forward\n output = super().__torch_function__(func, types, args, kwargs)\n # after hooks\n output = qstate.op_convert_after_hook(\n func, output, global_op_idx)\n qstate.mark_cur_op_complete(func)\n\n elif hook_type is HookType.ARG_DEQUANTS:\n # disabling torch function to prevent infinite recursion on\n # getset\n # TODO(future PR): handle more dtypes\n with torch._C.DisableTorchFunction():\n new_args = []\n for arg in args:\n if isinstance(arg, torch.Tensor) and arg.is_quantized:\n new_args.append(arg.dequantize())\n else:\n new_args.append(arg)\n args = tuple(new_args)\n output = super().__torch_function__(func, types, args, kwargs)\n\n else: # HookType.NONE\n output = super().__torch_function__(func, types, args, kwargs)\n\n # TODO: is this right? Don't really understand this\n if output is NotImplemented:\n with torch._C.DisableTorchFunction():\n output = func(*args, **kwargs).as_subclass(\n QuantizationConvertTensorProxy)\n assert output is not NotImplemented\n\n if enable_logging:\n out_dtype = None\n if isinstance(output, torch.Tensor):\n out_dtype = output.dtype\n logger.debug(f\"__torch_function__ {func} out {out_dtype} end\")\n\n return output\n\n def __repr__(self):\n return f'QuantizationConvertTensorProxy({super().__repr__()})'\n\n cur_module = None\n module_stack : List[torch.nn.Module] = []\n\n assert len(module.__class__.__bases__) == 1\n\n class QuantizationDispatchModule(module.__class__.__bases__[0]): # type: ignore[name-defined]\n \"\"\"\n An override of user defined subclass of `nn.Module` to enable\n dynamic tracing for quantization, after model conversion\n to quantized domain.\n\n `cur_module` keeps track of the current module in the stack.\n\n Tensor arguments are converted to `QuantizationConvertTensorProxy`.\n\n We override the `__call__` function to do the following for each\n module:\n\n If the module is an op which needs quantization:\n\n 1. calls `_auto_quant_state.validate_cur_op` to validate that\n the currently seen op is the same as what was recorded during tracing\n 2. calls parent module's `._auto_quant_state.op_convert_before_hook`\n 3. executes the original module forward\n 4. calls parent module's `_auto_quant_state.op_convert_after_hook`\n 5. calls `_auto_quant_state.mark_cur_op_complete` to increment\n the current op index in preparation for the next op\n\n If the module can contain children ops that need quantization:\n\n 1. calls `_auto_quant_state.inputs_convert_hook` (not implemented yet)\n 2. executes the original module forward\n 3. calls `_auto_quant_state.outputs_convert_hook`\n\n Otherwise, calls the original module forward.\n \"\"\"\n\n def __call__(self, *args, **kwargs):\n new_args = map_aggregate(args, convert_to_dispatch_proxy)\n new_kwargs = map_aggregate(kwargs, convert_to_dispatch_proxy)\n orig_module_call = torch.nn.Module.__call__\n orig_nn_sequential_forward = torch.nn.Sequential.forward\n\n def _patched_module_call(self, *args, **kwargs):\n if enable_logging:\n fqn = module_id_to_fqn.get(id(self), None)\n logger.debug(f\"\\nstarting fqn {fqn}\")\n\n nonlocal cur_module\n old_module = cur_module\n cur_module = self\n try:\n parent_module = module_stack[-1] if len(module_stack) else None\n module_stack.append(self)\n hook_type = get_module_hook_type(parent_module, cur_module)\n if enable_logging:\n logger.debug(\n f\"_patched_module_call {type(self)} \" +\n # f\"arg_types {[type(arg) for arg in args]} \" +\n f\"arg_dtypes {[arg.dtype if isinstance(arg, torch.Tensor) else None for arg in args]} \" +\n f\"hook_type {hook_type}\")\n\n if hook_type is HookType.OP_HOOKS:\n # before hooks\n qstate: AutoQuantizationState = \\\n parent_module._auto_quant_state # type: ignore[union-attr, assignment]\n if enable_logging:\n logger.debug(qstate)\n qstate.validate_cur_op(cur_module)\n _, args, kwargs = qstate.op_convert_before_hook(\n cur_module, args, kwargs, cur_module)\n # forward\n output = orig_module_call(self, *args, **kwargs)\n # after hooks\n output = qstate.op_convert_after_hook(\n cur_module, output, global_op_idx)\n qstate.mark_cur_op_complete(cur_module)\n\n elif hook_type is HookType.MODULE_IO_HOOKS:\n cur_qstate: AutoQuantizationState = cur_module._auto_quant_state\n if enable_logging:\n logger.debug(cur_qstate)\n\n cur_qstate.reset_to_new_call()\n\n # before hooks (TODO)\n # forward\n output = orig_module_call(self, *args, **kwargs)\n # after hooks\n output = cur_qstate.outputs_convert_hook(output)\n cur_qstate.validate_is_at_last_seen_idx()\n\n elif hook_type is HookType.ARG_DEQUANTS:\n # disabling torch function to prevent infinite recursion on\n # getset\n # TODO(future PR): handle more dtypes\n with torch._C.DisableTorchFunction():\n new_args = []\n for arg in args:\n if isinstance(arg, torch.Tensor) and arg.is_quantized:\n dequant = arg.dequantize().as_subclass(\n QuantizationConvertTensorProxy) # type: ignore[arg-type]\n new_args.append(dequant)\n else:\n new_args.append(arg)\n args = tuple(new_args)\n output = orig_module_call(self, *args, **kwargs)\n\n else:\n output = orig_module_call(self, *args, **kwargs)\n\n if enable_logging:\n logger.debug(\n f\"_patched_module_call {type(self)} \" +\n # f\"out {type(output)} \" +\n f\"dtype {output.dtype if isinstance(output, torch.Tensor) else None} \" +\n \"end\")\n logger.debug(f\"ending fqn {fqn}\\n\")\n return output\n finally:\n module_stack.pop()\n cur_module = old_module\n\n torch.nn.Module.__call__ = _patched_module_call\n torch.nn.Sequential.forward = _nn_sequential_patched_forward # type: ignore[assignment]\n\n try:\n global_op_idx[0] = 0\n\n needs_io_hooks = hasattr(self, '_auto_quant_state')\n\n # handle module input dtype conversions\n # TODO(implement)\n\n output = super().__call__(*new_args, **new_kwargs)\n\n # handle module output dtype conversions\n if needs_io_hooks:\n qstate = self._auto_quant_state\n assert isinstance(qstate, AutoQuantizationState)\n output = qstate.outputs_convert_hook(output)\n\n def unwrap_proxy(a):\n if isinstance(a, QuantizationConvertTensorProxy):\n a.__class__ = torch.Tensor # type: ignore[assignment]\n return a\n output = map_aggregate(output, unwrap_proxy)\n return output\n finally:\n torch.nn.Module.__call__ = orig_module_call\n torch.nn.Sequential.forward = orig_nn_sequential_forward # type: ignore[assignment]\n\n def rewrite_for_scripting(self):\n return auto_trace_rewriter.rewrite_for_scripting(self)\n\n pack_weights_for_functionals(module)\n attach_scale_zp_values_to_model(module)\n attach_op_convert_info_to_model(module)\n module.__class__ = QuantizationDispatchModule\n\n return module\n\n\n# AutoQuantizationState lives in parent module's _modules.\n# Currently, `torch.nn.Sequential`'s forward iterates over all\n# items in _modules. To avoid changing the meaning of the program, for\n# now we patch the forward to ignore our quantization state.\n# Note: this is a hackedy hack, before launching we should consider\n# checking the fix into `torch.nn.Sequential` to avoid the patch.\ndef _nn_sequential_patched_forward(cls, input):\n for module in cls:\n if not isinstance(module, AutoQuantizationState):\n input = module(input)\n return input\n",
"import torch\nimport functools\nfrom torch import Tensor\nfrom typing import Any, Callable, Optional, Tuple, Union, List\nfrom torch.utils._pytree import tree_flatten, tree_unflatten, _broadcast_to_and_flatten\nimport warnings\n\nin_dims_t = Union[int, Tuple]\nout_dims_t = Union[int, Tuple[int, ...]]\n\n# Checks that all args-to-be-batched have the same batch dim size\ndef _validate_and_get_batch_size(\n flat_in_dims: List[Optional[int]],\n flat_args: List) -> int:\n batch_sizes = [arg.size(in_dim) for in_dim, arg in zip(flat_in_dims, flat_args)\n if in_dim is not None]\n if batch_sizes and any([size != batch_sizes[0] for size in batch_sizes]):\n raise ValueError(\n f'vmap: Expected all tensors to have the same size in the mapped '\n f'dimension, got sizes {batch_sizes} for the mapped dimension')\n return batch_sizes[0]\n\ndef _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:\n if isinstance(batched_outputs, tuple):\n return len(batched_outputs)\n return 1\n\n# If value is a tuple, check it has length `num_elements`.\n# If value is not a tuple, make a tuple with `value` repeated `num_elements` times\ndef _as_tuple(value: Any, num_elements: int, error_message_lambda: Callable[[], str]) -> Tuple:\n if not isinstance(value, tuple):\n return (value,) * num_elements\n if len(value) != num_elements:\n raise ValueError(error_message_lambda())\n return value\n\n# Creates BatchedTensors for every Tensor in arg that should be batched.\n# Returns the (potentially) batched arguments and the batch_size.\ndef _create_batched_inputs(\n in_dims: in_dims_t, args: Tuple, vmap_level: int, func: Callable) -> Tuple[Tuple, int]:\n if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):\n raise ValueError(\n f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '\n f'expected `in_dims` to be int or a (potentially nested) tuple '\n f'matching the structure of inputs, got: {type(in_dims)}.')\n if len(args) == 0:\n raise ValueError(\n f'vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add '\n f'inputs, or you are trying to vmap over a function with no inputs. '\n f'The latter is unsupported.')\n\n flat_args, args_spec = tree_flatten(args)\n flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)\n if flat_in_dims is None:\n raise ValueError(\n f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '\n f'in_dims is not compatible with the structure of `inputs`. '\n f'in_dims has structure {tree_flatten(in_dims)[1]} but inputs '\n f'has structure {args_spec}.')\n\n for arg, in_dim in zip(flat_args, flat_in_dims):\n if not isinstance(in_dim, int) and in_dim is not None:\n raise ValueError(\n f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '\n f'Got in_dim={in_dim} for an input but in_dim must be either '\n f'an integer dimension or None.')\n if isinstance(in_dim, int) and not isinstance(arg, Tensor):\n raise ValueError(\n f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '\n f'Got in_dim={in_dim} for an input but the input is of type '\n f'{type(arg)}. We cannot vmap over non-Tensor arguments, '\n f'please use None as the respective in_dim')\n if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):\n raise ValueError(\n f'vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): '\n f'Got in_dim={in_dim} for some input, but that input is a Tensor '\n f'of dimensionality {arg.dim()} so expected in_dim to satisfy '\n f'0 <= in_dim < {arg.dim()}.')\n\n batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)\n # See NOTE [Ignored _remove_batch_dim, _add_batch_dim]\n batched_inputs = [arg if in_dim is None else\n torch._add_batch_dim(arg, in_dim, vmap_level)\n for in_dim, arg in zip(flat_in_dims, flat_args)]\n return tree_unflatten(batched_inputs, args_spec), batch_size\n\n# Undos the batching (and any batch dimensions) associated with the `vmap_level`.\ndef _unwrap_batched(\n batched_outputs: Union[Tensor, Tuple[Tensor, ...]],\n out_dims: out_dims_t, vmap_level: int, batch_size: int, func: Callable,\n allow_none_pass_through: bool = False) -> Tuple:\n num_outputs = _num_outputs(batched_outputs)\n out_dims_as_tuple = _as_tuple(\n out_dims, num_outputs,\n lambda: f'vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must '\n f'have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.')\n\n # NOTE [Ignored _remove_batch_dim, _add_batch_dim]\n # There is something wrong with our type bindings for functions that begin\n # with '_', see #40397.\n if isinstance(batched_outputs, Tensor):\n out_dim = out_dims_as_tuple[0]\n return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]\n if allow_none_pass_through:\n return tuple((torch._remove_batch_dim(out, vmap_level, batch_size, out_dim) if out is not None else None)\n for out, out_dim in zip(batched_outputs, out_dims_as_tuple))\n else:\n return tuple(torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)\n for out, out_dim in zip(batched_outputs, out_dims_as_tuple))\n\n# Checks that `fn` returned one or more Tensors and nothing else.\n# NB: A python function that return multiple arguments returns a single tuple,\n# so we are effectively checking that `outputs` is a single Tensor or a tuple of\n# Tensors.\ndef _validate_outputs(outputs: Any, func: Callable) -> None:\n if isinstance(outputs, Tensor):\n return\n if not isinstance(outputs, tuple):\n raise ValueError(f'vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return '\n f'Tensors, got type {type(outputs)} as the return.')\n for idx, output in enumerate(outputs):\n if isinstance(output, Tensor):\n continue\n raise ValueError(f'vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return '\n f'Tensors, got type {type(output)} for return {idx}.')\n\ndef _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:\n if isinstance(out_dims, int):\n return\n if not isinstance(out_dims, tuple) or \\\n not all([isinstance(out_dim, int) for out_dim in out_dims]):\n raise ValueError(\n f'vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be '\n f'an int or a tuple of int representing where in the outputs the '\n f'vmapped dimension should appear.')\n\ndef _get_name(func: Callable):\n if hasattr(func, '__name__'):\n return func.__name__\n\n # Not all callables have __name__, in fact, only static functions/methods do.\n # A callable created via functools.partial or an nn.Module, to name some\n # examples, don't have a __name__.\n return repr(func)\n\n# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,\n# sends those into func, and then unwraps the output BatchedTensors. Operations\n# on BatchedTensors perform the batched operations that the user is asking for.\ndef vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:\n \"\"\"\n vmap is the vectorizing map. Returns a new function that maps `func` over some\n dimension of the inputs. Semantically, vmap pushes the map into PyTorch\n operations called by `func`, effectively vectorizing those operations.\n\n vmap is useful for handling batch dimensions: one can write a function `func`\n that runs on examples and then lift it to a function that can take batches of\n examples with `vmap(func)`. vmap can also be used to compute batched\n gradients when composed with autograd.\n\n .. note::\n We have moved development of vmap to\n `functorch. <https://github.com/pytorch/functorch>`_ functorch's\n vmap is able to arbitrarily compose with gradient computation\n and contains significant performance improvements.\n Please give that a try if that is what you're looking for.\n\n Furthermore, if you're interested in using vmap for your use case,\n please `contact us! <https://github.com/pytorch/pytorch/issues/42368>`_\n We're interested in gathering feedback from early adopters to inform\n the design.\n\n .. warning::\n torch.vmap is an experimental prototype that is subject to\n change and/or deletion. Please use at your own risk.\n\n Args:\n func (function): A Python function that takes one or more arguments.\n Must return one or more Tensors.\n in_dims (int or nested structure): Specifies which dimension of the\n inputs should be mapped over. `in_dims` should have a structure\n like the inputs. If the `in_dim` for a particular input is None,\n then that indicates there is no map dimension. Default: 0.\n out_dims (int or Tuple[int]): Specifies where the mapped dimension\n should appear in the outputs. If `out_dims` is a Tuple, then it should\n have one element per output. Default: 0.\n\n Returns:\n Returns a new \"batched\" function. It takes the same inputs as `func`,\n except each input has an extra dimension at the index specified by `in_dims`.\n It takes returns the same outputs as `func`, except each output has\n an extra dimension at the index specified by `out_dims`.\n\n .. warning:\n vmap works best with functional-style code. Please do not perform any\n side-effects in `func`, with the exception of in-place PyTorch operations.\n Examples of side-effects include mutating Python data structures and\n assigning values to variables not captured in `func`.\n\n One example of using `vmap` is to compute batched dot products. PyTorch\n doesn't provide a batched `torch.dot` API; instead of unsuccessfully\n rummaging through docs, use `vmap` to construct a new function.\n\n >>> torch.dot # [D], [D] -> []\n >>> batched_dot = torch.vmap(torch.dot) # [N, D], [N, D] -> [N]\n >>> x, y = torch.randn(2, 5), torch.randn(2, 5)\n >>> batched_dot(x, y)\n\n `vmap` can be helpful in hiding batch dimensions, leading to a simpler\n model authoring experience.\n\n >>> batch_size, feature_size = 3, 5\n >>> weights = torch.randn(feature_size, requires_grad=True)\n >>>\n >>> def model(feature_vec):\n >>> # Very simple linear model with activation\n >>> return feature_vec.dot(weights).relu()\n >>>\n >>> examples = torch.randn(batch_size, feature_size)\n >>> result = torch.vmap(model)(examples)\n\n `vmap` can also help vectorize computations that were previously difficult\n or impossible to batch. One example is higher-order gradient computation.\n The PyTorch autograd engine computes vjps (vector-Jacobian products).\n Computing a full Jacobian matrix for some function f: R^N -> R^N usually\n requires N calls to `autograd.grad`, one per Jacobian row. Using `vmap`,\n we can vectorize the whole computation, computing the Jacobian in a single\n call to `autograd.grad`.\n\n >>> # Setup\n >>> N = 5\n >>> f = lambda x: x ** 2\n >>> x = torch.randn(N, requires_grad=True)\n >>> y = f(x)\n >>> I_N = torch.eye(N)\n >>>\n >>> # Sequential approach\n >>> jacobian_rows = [torch.autograd.grad(y, x, v, retain_graph=True)[0]\n >>> for v in I_N.unbind()]\n >>> jacobian = torch.stack(jacobian_rows)\n >>>\n >>> # vectorized gradient computation\n >>> def get_vjp(v):\n >>> return torch.autograd.grad(y, x, v)\n >>> jacobian = torch.vmap(get_vjp)(I_N)\n\n .. note::\n vmap does not provide general autobatching or handle variable-length\n sequences out of the box.\n \"\"\"\n warnings.warn(\n 'Please use functorch.vmap instead of torch.vmap '\n '(https://github.com/pytorch/functorch). '\n 'We\\'ve moved development on torch.vmap over to functorch; '\n 'functorch\\'s vmap has a multitude of significant performance and '\n 'functionality improvements.',\n stacklevel=2)\n return _vmap(func, in_dims, out_dims)\n\n# A version of vmap but without the initial \"experimental prototype\" warning\ndef _vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0, allow_none_pass_through: bool = False) -> Callable:\n # The `allow_none_pass_through` argument is a temporary workaround may be removed.\n # Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,\n # which may return None if any of the inputs are unused. See the issue discussing this:\n # https://github.com/facebookresearch/functorch/issues/159.\n @functools.wraps(func)\n def wrapped(*args):\n _check_out_dims_is_int_or_int_tuple(out_dims, func)\n vmap_level = torch._C._vmapmode_increment_nesting()\n try:\n batched_inputs, batch_size = _create_batched_inputs(in_dims, args, vmap_level, func)\n batched_outputs = func(*batched_inputs)\n if not allow_none_pass_through:\n _validate_outputs(batched_outputs, func)\n return _unwrap_batched(batched_outputs, out_dims, vmap_level, batch_size, func,\n allow_none_pass_through=allow_none_pass_through)\n finally:\n torch._C._vmapmode_decrement_nesting()\n return wrapped\n",
"import torch.fx\nimport torchvision.models as models\nfrom torch.fx.experimental.fx2trt.fx2trt import TRTInterpreter, InputTensorSpec, TRTModule\nfrom torch.ao.quantization.quantize_fx import prepare_fx, convert_fx\nimport torch.fx.experimental.fx_acc.acc_tracer as acc_tracer\nimport copy\nfrom torch.fx.passes import shape_prop\nfrom torch.fx.experimental.normalize import NormalizeArgs\nimport tensorrt as trt\n\nrn18 = models.resnet18().eval()\n\ndef build_fp16_trt(rn18):\n rn18 = copy.deepcopy(rn18)\n rn18 = acc_tracer.trace(rn18, [torch.randn(1, 3, 224, 224)])\n interp = TRTInterpreter(\n rn18, [InputTensorSpec(torch.Size([3, 224, 224]), torch.float, has_batch_dim=False)])\n interpreter_result = interp.run(fp16_mode=True)\n return TRTModule(interpreter_result.engine, interpreter_result.input_names, interpreter_result.output_names)\n\[email protected]_grad()\ndef build_int8_trt(rn18):\n rn18 = copy.deepcopy(rn18)\n data = torch.randn(1, 3, 224, 224)\n # data = torch.randn(1, 32)\n # data = torch.randn(1, 64, 10, 10)\n # TensorRT only supports symmetric quantization\n qconfig = torch.ao.quantization.QConfig(\n activation=torch.ao.quantization.observer.HistogramObserver.with_args(\n qscheme=torch.per_tensor_symmetric, dtype=torch.qint8\n ),\n # weight=torch.ao.quantization.default_weight_observer\n # uncomment to check per channel quant works\n weight=torch.quantization.default_per_channel_weight_observer\n )\n prepared = prepare_fx(rn18, {\"\": qconfig})\n for _ in range(10):\n prepared(data)\n quantized_rn18 = convert_fx(prepared, is_reference=True)\n ref_res = quantized_rn18(data)\n print(\"quantized model:\", quantized_rn18)\n\n quantized_rn18 = acc_tracer.trace(quantized_rn18, [data]) # type: ignore[assignment]\n interp = TRTInterpreter(\n quantized_rn18,\n [InputTensorSpec(torch.Size([-1, *data.shape[1:]]), torch.float,\n shape_ranges=[((1, 3, 224, 224), (5, 3, 224, 224), (10, 3, 224, 224))], has_batch_dim=True)],\n explicit_batch_dimension=True, explicit_precision=True, logger_level=trt.Logger.VERBOSE)\n interpreter_result = interp.run(fp16_mode=False, int8_mode=True)\n trt_mod = TRTModule(interpreter_result.engine, interpreter_result.input_names, interpreter_result.output_names)\n trt_res = trt_mod(data.cuda())\n print(\"explicit quant result diff max\", torch.max(ref_res - trt_res.cpu()))\n return trt_mod\n\[email protected]_grad()\ndef build_int8_trt_implicit_quant(rn18):\n rn18 = copy.deepcopy(rn18)\n data = torch.randn(1, 3, 224, 224)\n # Quantization\n qconfig = torch.ao.quantization.QConfig(\n activation=torch.ao.quantization.observer.HistogramObserver.with_args(\n qscheme=torch.per_tensor_symmetric, reduce_range=True\n ),\n weight=torch.ao.quantization.default_per_channel_weight_observer\n )\n prepared = prepare_fx(rn18, {\"\": qconfig})\n for _ in range(10):\n prepared(data)\n quantized_rn18 = convert_fx(prepared)\n ref_res = quantized_rn18(data)\n\n # Build trt int8 model\n traced_rn18 = torch.fx.symbolic_trace(quantized_rn18)\n shape_prop.ShapeProp(traced_rn18).propagate(data)\n traced_rn18 = NormalizeArgs(traced_rn18).transform()\n interp = TRTInterpreter(traced_rn18, InputTensorSpec.from_tensors([data]), logger_level=trt.Logger.VERBOSE)\n interpreter_result = interp.run(fp16_mode=False, int8_mode=True, strict_type_constraints=True)\n trt_mod = TRTModule(interpreter_result.engine, interpreter_result.input_names, interpreter_result.output_names)\n trt_res = trt_mod(data.cuda())\n print(\"implicit quant result diff max\", torch.max(ref_res - trt_res.cpu()))\n return trt_mod\n\nclass M(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.linear = torch.nn.Linear(32, 46)\n # self.conv = torch.nn.Conv2d(3, 3, 3, padding=1)\n\n def forward(self, x):\n # out = self.conv(x)\n out = self.linear(x)\n # out = torch.nn.functional.relu(out)\n # out += x\n # out += out\n # out = torch.nn.functional.relu(out)\n return out\n\n# rn18 = M().eval()\n# rn18 = rn18.layer1\nint8_trt = build_int8_trt(rn18)\nimplicit_int8_trt = build_int8_trt_implicit_quant(rn18)\nfp16_trt = build_fp16_trt(rn18)\nx = torch.randn(5, 3, 224, 224, device=\"cuda\")\n# x = torch.randn(1, 32, device=\"cuda\")\nrn18 = rn18.cuda()\n\nimport time\nNITER = 100\n\ntorch.cuda.synchronize()\ns = time.time()\nfor _ in range(NITER):\n fp16_trt(x)\n torch.cuda.synchronize()\nprint('trt fp16 time (ms/iter)', (time.time() - s) / NITER * 1000)\n\ntorch.cuda.synchronize()\ns = time.time()\nfor _ in range(NITER):\n int8_trt(x)\n torch.cuda.synchronize()\nprint('trt int8 time (ms/iter)', (time.time() - s) / NITER * 1000)\n\ntorch.cuda.synchronize()\ns = time.time()\nfor _ in range(NITER):\n implicit_int8_trt(x)\n torch.cuda.synchronize()\nprint('trt implicit int8 time (ms/iter)', (time.time() - s) / NITER * 1000)\n\ntorch.cuda.synchronize()\ns = time.time()\nfor _ in range(NITER):\n rn18(x)\n torch.cuda.synchronize()\nprint('PyTorch time (CUDA) (ms/iter)', (time.time() - s) / NITER * 1000)\n\ntorch.cuda.synchronize()\ns = time.time()\nrn18 = rn18.cpu()\nx = x.cpu()\nfor _ in range(NITER):\n rn18(x)\nprint('PyTorch time (CPU) (ms/iter)', (time.time() - s) / NITER * 1000)\n",
"# Owner(s): [\"module: unknown\"]\n\nimport torch\nimport torch.nn as nn\n\n\nclass Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.linear = nn.Linear(20, 20)\n\n def forward(self, input):\n out = self.linear(input[:, 10:30])\n return out.sum()\n\n\ndef main():\n data = torch.randn(10, 50).cuda()\n model = Model().cuda()\n optimizer = torch.optim.SGD(model.parameters(), lr=0.0001)\n for i in range(10):\n optimizer.zero_grad()\n loss = model(data)\n loss.backward()\n optimizer.step()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"torch.ones",
"torch.nn.utils.parametrize.is_parametrized",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.cuda.is_available",
"torch.nn.BatchNorm2d",
"torch.device",
"torch.nn.ReLU"
],
[
"torch.randn"
],
[
"torch.nn.functional.batch_norm",
"torch.nn.functional.softmax",
"numpy.expand_dims",
"torch.randint",
"torch.ge",
"torch.max",
"torch._C._debug_set_fusion_group_inlining",
"torch.zeros",
"torch._C._jit_set_profiling_mode",
"torch.cat",
"torch._C._jit_set_texpr_reductions_enabled",
"torch.lt",
"torch.rand_like",
"torch.jit.last_executed_optimized_graph",
"torch._C._jit_override_can_fuse_on_gpu",
"torch.le",
"numpy.mean",
"torch.cuda.is_available",
"torch.allclose",
"torch._C._jit_set_te_must_use_llvm_cpu",
"torch.pow",
"torch.jit.script",
"torch.ones",
"torch.jit.trace",
"torch.add",
"numpy.clip",
"torch.eq",
"torch.randn",
"torch.from_numpy",
"torch._C._jit_texpr_fuser_enabled",
"torch._C._jit_set_texpr_fuser_enabled",
"torch.tensor",
"numpy.full",
"torch.mul",
"torch._C._jit_override_can_fuse_on_cpu",
"torch.rand",
"torch.nn.functional.relu",
"torch.arange",
"torch._C._jit_get_num_profiled_runs",
"torch.gt",
"numpy.zeros",
"torch.testing.assert_close",
"torch._C._jit_get_te_generate_block_code",
"torch._C._jit_can_fuse_on_cpu",
"torch.full",
"torch._C._jit_set_te_generate_block_code",
"torch._C._jit_texpr_fallback_allowed",
"torch.min",
"torch.unsqueeze",
"torch._C._jit_can_fuse_on_gpu",
"numpy.testing.assert_allclose",
"numpy.array",
"torch.testing._internal.common_utils.run_tests",
"torch._C._debug_get_fusion_group_inlining",
"torch.as_strided",
"torch.ne",
"numpy.maximum",
"torch._C._jit_set_profiling_executor",
"torch.nn.functional.log_softmax",
"torch.testing._internal.common_utils.num_profiled_runs",
"torch.manual_seed",
"torch.sub",
"numpy.ones",
"torch._C._jit_texpr_set_fallback_allowed",
"torch.erf",
"torch._C._jit_get_te_must_use_llvm_cpu",
"torch.chunk",
"torch.clamp",
"numpy.array_split",
"numpy.empty"
],
[
"torch.nn.utils.parametrize.register_parametrization",
"torch.nn.utils.parametrize.remove_parametrizations",
"torch.ones",
"torch.no_grad",
"torch.ones_like"
],
[
"torch.jit.save",
"torch.jit.load",
"torch.empty_like",
"torch.ones",
"torch.utils.bundled_inputs.augment_model_with_bundled_inputs",
"torch.zeros",
"torch.utils.bundled_inputs.bundle_randn",
"torch.randn",
"torch.utils.bundled_inputs.bundle_large_tensor",
"torch.utils.bundled_inputs.augment_many_model_functions_with_bundled_inputs",
"torch.tensor",
"torch.testing._internal.common_utils.run_tests",
"torch.utils.bundled_inputs._get_bundled_inputs_attributes_and_methods"
],
[
"torch.quantization.fuse_modules",
"torch.quantization.propagate_qconfig_",
"torch.quantization.prepare",
"torch.quantization.convert"
],
[
"torch.testing._internal.common_utils.run_tests"
],
[
"torch.onnx.symbolic_helper._get_const",
"torch.onnx.symbolic_helper.parse_args",
"torch.onnx.symbolic_helper._reducesum_helper",
"torch.onnx.symbolic_opset9.unbind",
"torch.onnx.symbolic_helper._sort_helper",
"torch.onnx.symbolic_helper._get_tensor_dim_size",
"torch.onnx.symbolic_opset10.flip",
"torch.onnx.symbolic_opset9.cat",
"torch.onnx.symbolic_helper._maybe_get_const",
"torch.onnx.symbolic_helper._flatten_helper",
"torch.onnx.symbolic_opset9.remainder",
"torch.onnx.symbolic_helper._is_fp",
"torch.onnx.symbolic_helper.scalar_type_to_onnx.index",
"torch.onnx.symbolic_helper._is_packed_list",
"torch.onnx.symbolic_helper._unsqueeze_helper",
"torch.ones",
"torch.onnx.symbolic_helper._is_split_static",
"torch.onnx.symbolic_opset9.masked_fill",
"torch.onnx.symbolic_helper.__interpolate_helper",
"torch.onnx.symbolic_helper._if_scalar_type_as",
"torch.onnx.symbolic_helper._size_helper",
"torch.onnx.symbolic_helper._onnx_unsupported",
"torch.onnx.utils._add_output_to_block",
"torch.onnx.symbolic_opset9.split",
"torch.onnx.utils._add_input_to_block",
"torch.tensor",
"torch.onnx.symbolic_helper._unpack_list",
"torch.onnx.symbolic_helper._topk_helper",
"torch.onnx.utils._add_block",
"torch.arange",
"torch.onnx.symbolic_helper._unimplemented",
"torch.onnx.symbolic_opset9.expand",
"torch.onnx.symbolic_opset9.__getitem_",
"torch.LongTensor",
"torch.onnx.symbolic_helper._maybe_get_scalar",
"torch.onnx.symbolic_opset9.index",
"torch.onnx.symbolic_helper._is_scalar_list",
"torch.onnx.symbolic_opset9.unused",
"torch.onnx.symbolic_helper._reshape_helper",
"torch.onnx.symbolic_opset9.stack",
"torch.onnx.symbolic_helper._index_fill_reshape_helper",
"torch.onnx.symbolic_helper._is_value",
"torch.onnx.symbolic_helper._is_tensor_list",
"torch.onnx.symbolic_helper._parse_arg",
"torch.onnx.symbolic_opset9.expand_as",
"torch.onnx.symbolic_helper._slice_helper",
"torch.onnx.symbolic_helper._avgpool_helper",
"torch.onnx.symbolic_helper._squeeze_helper",
"torch.onnx.symbolic_helper._get_tensor_rank",
"torch.onnx.symbolic_helper._arange_cast_helper",
"torch.onnx.symbolic_helper._is_list",
"torch.onnx.symbolic_opset9.add",
"torch.onnx.symbolic_helper._interpolate_helper",
"torch.onnx.symbolic_helper._is_none",
"torch.onnx.symbolic_opset9.nonzero"
],
[
"torch.testing._internal.distributed.rpc_utils.generate_tests",
"torch.distributed.is_available",
"torch.testing._internal.common_utils.run_tests",
"torch.cuda.is_available"
],
[
"torch.distributed._sharding_spec.ShardMetadata"
],
[
"torch._C.DisableTorchFunction",
"torch.fx.node.map_aggregate"
],
[
"torch.utils._pytree.tree_unflatten",
"torch.utils._pytree.tree_flatten",
"torch._remove_batch_dim",
"torch._C._vmapmode_decrement_nesting",
"torch._C._vmapmode_increment_nesting",
"torch._add_batch_dim",
"torch.utils._pytree._broadcast_to_and_flatten"
],
[
"torch.fx.experimental.fx_acc.acc_tracer.trace",
"torch.fx.passes.shape_prop.ShapeProp",
"torch.ao.quantization.quantize_fx.convert_fx",
"torch.fx.experimental.fx2trt.fx2trt.InputTensorSpec.from_tensors",
"torch.fx.experimental.normalize.NormalizeArgs",
"torch.ao.quantization.quantize_fx.prepare_fx",
"torch.fx.experimental.fx2trt.fx2trt.TRTModule"
],
[
"torch.nn.Linear",
"torch.randn"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
RobinRojowiec/intent-recognition-in-doctor-patient-interviews | [
"b91c7a9f3ad70edd0f39b56e3219f48d1fcf2078"
] | [
"models/siamese_neural_network.py"
] | [
"import random\n\nimport torch\nimport torch.nn as nn\n\nfrom models.cnn_layer import CNNLayer\nfrom utility.model_parameter import ModelParameter, Configuration\n\n\nclass SiameseNeuralNetwork(nn.Module):\n def __init__(self, config: Configuration, label_count=64, device=torch.device('cpu'), *args, **kwargs):\n super(SiameseNeuralNetwork, self).__init__()\n\n # set parameters\n self.max_length = config.get_int(ModelParameter.MAX_LENGTH)\n self.device = device\n\n # create and initialize layers\n self.cnn_layer = CNNLayer(config)\n self.distance_measure = nn.CosineSimilarity()\n\n def distance(self, a, b):\n return self.distance_measure(a, b)\n\n def get_output_dim(self):\n return self.cnn_layer.get_output_length()\n\n def forward(self, sample, previous_classes, positions, previous_sample, sample_pos, *sample_neg, **kwargs):\n n_negative = len(sample_neg)\n selected_negative = sample_neg[random.randint(0, n_negative - 1)]\n return self.compare(sample, sample_pos, mode=kwargs[\"mode\"]), self.compare(sample, selected_negative,\n mode=kwargs[\"mode\"])\n\n def get_features(self, sample):\n return self.cnn_layer(sample)\n\n def compare(self, sample_1, sample_2, mode=\"train\", **kwargs):\n encoded_sample_1 = self.cnn_layer(sample_1)\n encoded_sample_2 = self.cnn_layer(sample_2)\n\n return self.distance(encoded_sample_1, encoded_sample_2)\n"
] | [
[
"torch.device",
"torch.nn.CosineSimilarity"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ujjaldas132/models | [
"e13441ed200ce1bb204977e731508748bd0e0d14",
"e13441ed200ce1bb204977e731508748bd0e0d14",
"befbe0f9fe02d6bc1efb1c462689d069dae23af1"
] | [
"official/recommendation/ncf_test.py",
"official/modeling/model_training_utils_test.py",
"official/modeling/tf_utils.py"
] | [
"# Copyright 2018 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests NCF.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport unittest\n\nimport mock\nimport numpy as np\nimport tensorflow as tf\n\nfrom official.recommendation import constants as rconst\nfrom official.recommendation import data_pipeline\nfrom official.recommendation import neumf_model\nfrom official.recommendation import ncf_common\nfrom official.recommendation import ncf_estimator_main\nfrom official.recommendation import ncf_keras_main\nfrom official.utils.misc import keras_utils\nfrom official.utils.testing import integration\n\nfrom tensorflow.python.eager import context # pylint: disable=ungrouped-imports\n\n\nNUM_TRAIN_NEG = 4\n\n\nclass NcfTest(tf.test.TestCase):\n\n @classmethod\n def setUpClass(cls): # pylint: disable=invalid-name\n super(NcfTest, cls).setUpClass()\n ncf_common.define_ncf_flags()\n\n def setUp(self):\n self.top_k_old = rconst.TOP_K\n self.num_eval_negatives_old = rconst.NUM_EVAL_NEGATIVES\n rconst.NUM_EVAL_NEGATIVES = 2\n\n def tearDown(self):\n rconst.NUM_EVAL_NEGATIVES = self.num_eval_negatives_old\n rconst.TOP_K = self.top_k_old\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n def get_hit_rate_and_ndcg(self, predicted_scores_by_user, items_by_user,\n top_k=rconst.TOP_K, match_mlperf=False):\n rconst.TOP_K = top_k\n rconst.NUM_EVAL_NEGATIVES = predicted_scores_by_user.shape[1] - 1\n batch_size = items_by_user.shape[0]\n\n users = np.repeat(np.arange(batch_size)[:, np.newaxis],\n rconst.NUM_EVAL_NEGATIVES + 1, axis=1)\n users, items, duplicate_mask = \\\n data_pipeline.BaseDataConstructor._assemble_eval_batch(\n users, items_by_user[:, -1:], items_by_user[:, :-1], batch_size)\n\n g = tf.Graph()\n with g.as_default():\n logits = tf.convert_to_tensor(\n predicted_scores_by_user.reshape((-1, 1)), tf.float32)\n softmax_logits = tf.concat([tf.zeros(logits.shape, dtype=logits.dtype),\n logits], axis=1)\n duplicate_mask = tf.convert_to_tensor(duplicate_mask, tf.float32)\n\n metric_ops = neumf_model._get_estimator_spec_with_metrics(\n logits=logits, softmax_logits=softmax_logits,\n duplicate_mask=duplicate_mask, num_training_neg=NUM_TRAIN_NEG,\n match_mlperf=match_mlperf).eval_metric_ops\n\n hr = metric_ops[rconst.HR_KEY]\n ndcg = metric_ops[rconst.NDCG_KEY]\n\n init = [tf.compat.v1.global_variables_initializer(),\n tf.compat.v1.local_variables_initializer()]\n\n with self.session(graph=g) as sess:\n sess.run(init)\n return sess.run([hr[1], ndcg[1]])\n\n def test_hit_rate_and_ndcg(self):\n # Test with no duplicate items\n predictions = np.array([\n [2., 0., 1.], # In top 2\n [1., 0., 2.], # In top 1\n [2., 1., 0.], # In top 3\n [3., 4., 2.] # In top 3\n ])\n items = np.array([\n [2, 3, 1],\n [3, 1, 2],\n [2, 1, 3],\n [1, 3, 2],\n ])\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n # Test with duplicate items. In the MLPerf case, we treat the duplicates as\n # a single item. Otherwise, we treat the duplicates as separate items.\n predictions = np.array([\n [2., 2., 3., 1.], # In top 4. MLPerf: In top 3\n [1., 0., 2., 3.], # In top 1. MLPerf: In top 1\n [2., 3., 2., 0.], # In top 4. MLPerf: In top 3\n [2., 4., 2., 3.] # In top 2. MLPerf: In top 2\n ])\n items = np.array([\n [2, 2, 3, 1],\n [2, 3, 4, 1],\n [2, 3, 2, 1],\n [3, 2, 1, 4],\n ])\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(5)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 1,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 1 / 4)\n self.assertAlmostEqual(ndcg, 1 / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 2,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 2 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 3,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n hr, ndcg = self.get_hit_rate_and_ndcg(predictions, items, 4,\n match_mlperf=True)\n self.assertAlmostEqual(hr, 4 / 4)\n self.assertAlmostEqual(ndcg, (1 + math.log(2) / math.log(3) +\n 2 * math.log(2) / math.log(4)) / 4)\n\n _BASE_END_TO_END_FLAGS = ['-batch_size', '1044', '-train_epochs', '1']\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_estimator(self):\n integration.run_synthetic(\n ncf_estimator_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS)\n\n @unittest.skipIf(keras_utils.is_v2_0(), \"TODO(b/136018594)\")\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_estimator_mlperf(self):\n integration.run_synthetic(\n ncf_estimator_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-ml_perf', 'True'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n def test_end_to_end_keras_no_dist_strat(self):\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS +\n ['-distribution_strategy', 'off'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_dist_strat(self):\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '0'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_dist_strat_ctl(self):\n flags = (self._BASE_END_TO_END_FLAGS +\n ['-num_gpus', '0'] +\n ['-keras_use_ctl', 'True'])\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=flags)\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_1_gpu_dist_strat(self):\n if context.num_gpus() < 1:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(1, context.num_gpus()))\n\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '1'])\n\n @mock.patch.object(rconst, \"SYNTHETIC_BATCHES_PER_EPOCH\", 100)\n @unittest.skipUnless(keras_utils.is_v2_0(), 'TF 2.0 only test.')\n def test_end_to_end_keras_2_gpu(self):\n if context.num_gpus() < 2:\n self.skipTest(\n \"{} GPUs are not available for this test. {} GPUs are available\".\n format(2, context.num_gpus()))\n\n integration.run_synthetic(\n ncf_keras_main.main, tmp_root=self.get_temp_dir(),\n extra_flags=self._BASE_END_TO_END_FLAGS + ['-num_gpus', '2'])\n\nif __name__ == \"__main__\":\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for official.modeling.training.model_training_utils.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow as tf\n\nfrom tensorflow.python.distribute import combinations\nfrom tensorflow.python.distribute import strategy_combinations\nfrom official.modeling import model_training_utils\n\n\ndef eager_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.default_strategy,\n strategy_combinations.tpu_strategy,\n strategy_combinations.one_device_strategy_gpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.mirrored_strategy_with_two_gpus,\n ],\n mode='eager',\n )\n\n\ndef eager_gpu_strategy_combinations():\n return combinations.combine(\n distribution=[\n strategy_combinations.default_strategy,\n strategy_combinations.one_device_strategy_gpu,\n strategy_combinations.mirrored_strategy_with_gpu_and_cpu,\n strategy_combinations.mirrored_strategy_with_two_gpus,\n ],\n mode='eager',\n )\n\n\ndef create_fake_data_input_fn(batch_size, features_shape, num_classes):\n \"\"\"Creates a dummy input function with the given feature and label shapes.\n\n Args:\n batch_size: integer.\n features_shape: list[int]. Feature shape for an individual example.\n num_classes: integer. Number of labels.\n\n Returns:\n An input function that is usable in the executor.\n \"\"\"\n\n def _input_fn():\n \"\"\"An input function for generating fake data.\"\"\"\n features = np.random.rand(64, *features_shape)\n labels = np.random.randint(2, size=[64, num_classes])\n # Convert the inputs to a Dataset.\n dataset = tf.data.Dataset.from_tensor_slices((features, labels))\n\n def _assign_dtype(features, labels):\n features = tf.cast(features, tf.float32)\n labels = tf.cast(labels, tf.float32)\n return features, labels\n\n # Shuffle, repeat, and batch the examples.\n dataset = dataset.map(_assign_dtype)\n dataset = dataset.shuffle(64).repeat()\n dataset = dataset.batch(batch_size, drop_remainder=True)\n dataset = dataset.prefetch(buffer_size=64)\n return dataset\n\n return _input_fn\n\n\ndef create_model_fn(input_shape, num_classes, use_float16=False):\n\n def _model_fn():\n \"\"\"A one-layer softmax model suitable for testing.\"\"\"\n input_layer = tf.keras.layers.Input(shape=input_shape)\n x = tf.keras.layers.Dense(num_classes, activation='relu')(input_layer)\n output_layer = tf.keras.layers.Dense(num_classes, activation='softmax')(x)\n sub_model = tf.keras.models.Model(input_layer, x, name='sub_model')\n model = tf.keras.models.Model(input_layer, output_layer, name='model')\n model.add_metric(\n tf.reduce_mean(input_layer), name='mean_input', aggregation='mean')\n model.optimizer = tf.keras.optimizers.SGD(learning_rate=0.1, momentum=0.9)\n if use_float16:\n model.optimizer = (\n tf.keras.mixed_precision.experimental.LossScaleOptimizer(\n model.optimizer, loss_scale='dynamic'))\n return model, sub_model\n\n return _model_fn\n\n\ndef metric_fn():\n \"\"\"Gets a tf.keras metric object.\"\"\"\n return tf.keras.metrics.CategoricalAccuracy(name='accuracy', dtype=tf.float32)\n\n\ndef summaries_with_matching_keyword(keyword, summary_dir):\n \"\"\"Yields summary protos matching given keyword from event file.\"\"\"\n event_paths = tf.io.gfile.glob(os.path.join(summary_dir, 'events*'))\n for event in tf.compat.v1.train.summary_iterator(event_paths[-1]):\n if event.summary is not None:\n for value in event.summary.value:\n if keyword in value.tag:\n tf.compat.v1.logging.error(event)\n yield event.summary\n\n\ndef check_eventfile_for_keyword(keyword, summary_dir):\n \"\"\"Checks event files for the keyword.\"\"\"\n return any(summaries_with_matching_keyword(keyword, summary_dir))\n\n\nclass ModelTrainingUtilsTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(ModelTrainingUtilsTest, self).setUp()\n self._input_fn = create_fake_data_input_fn(\n batch_size=8, features_shape=[128], num_classes=3)\n self._model_fn = create_model_fn(input_shape=[128], num_classes=3)\n\n def run_training(self, distribution, model_dir, steps_per_loop, run_eagerly):\n model_training_utils.run_customized_training_loop(\n strategy=distribution,\n model_fn=self._model_fn,\n loss_fn=tf.keras.losses.categorical_crossentropy,\n model_dir=model_dir,\n steps_per_epoch=20,\n steps_per_loop=steps_per_loop,\n epochs=2,\n train_input_fn=self._input_fn,\n eval_input_fn=self._input_fn,\n eval_steps=10,\n init_checkpoint=None,\n metric_fn=metric_fn,\n custom_callbacks=None,\n run_eagerly=run_eagerly)\n\n @combinations.generate(eager_strategy_combinations())\n def test_train_eager_single_step(self, distribution):\n model_dir = self.get_temp_dir()\n if isinstance(distribution, tf.distribute.experimental.TPUStrategy):\n with self.assertRaises(ValueError):\n self.run_training(\n distribution, model_dir, steps_per_loop=1, run_eagerly=True)\n else:\n self.run_training(\n distribution, model_dir, steps_per_loop=1, run_eagerly=True)\n\n @combinations.generate(eager_gpu_strategy_combinations())\n def test_train_eager_mixed_precision(self, distribution):\n model_dir = self.get_temp_dir()\n policy = tf.keras.mixed_precision.experimental.Policy('mixed_float16')\n tf.keras.mixed_precision.experimental.set_policy(policy)\n self._model_fn = create_model_fn(\n input_shape=[128], num_classes=3, use_float16=True)\n self.run_training(\n distribution, model_dir, steps_per_loop=1, run_eagerly=True)\n\n @combinations.generate(eager_strategy_combinations())\n def test_train_check_artifacts(self, distribution):\n model_dir = self.get_temp_dir()\n self.run_training(\n distribution, model_dir, steps_per_loop=10, run_eagerly=False)\n\n # Two checkpoints should be saved after two epochs.\n self.assertNotEmpty(tf.io.gfile.glob(os.path.join(model_dir, 'ctl_step_*')))\n self.assertNotEmpty(\n tf.io.gfile.glob(os.path.join(model_dir, 'training_summary*')))\n\n # Loss and accuracy values should be written into summaries.\n self.assertTrue(\n check_eventfile_for_keyword('loss',\n os.path.join(model_dir, 'summaries/train')))\n self.assertTrue(\n check_eventfile_for_keyword('accuracy',\n os.path.join(model_dir, 'summaries/train')))\n self.assertTrue(\n check_eventfile_for_keyword('mean_input',\n os.path.join(model_dir, 'summaries/train')))\n self.assertTrue(\n check_eventfile_for_keyword('accuracy',\n os.path.join(model_dir, 'summaries/eval')))\n self.assertTrue(\n check_eventfile_for_keyword('mean_input',\n os.path.join(model_dir, 'summaries/eval')))\n\n\nif __name__ == '__main__':\n assert tf.version.VERSION.startswith('2.')\n tf.test.main()\n",
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Common TF utilities.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport six\nimport tensorflow as tf\n\nfrom official.modeling import activations\n\n\ndef pack_inputs(inputs):\n \"\"\"Pack a list of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is None, replace it with a special constant\n tensor.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if x is None:\n outputs.append(tf.constant(0, shape=[], dtype=tf.int32))\n else:\n outputs.append(x)\n return tuple(outputs)\n\n\ndef unpack_inputs(inputs):\n \"\"\"unpack a tuple of `inputs` tensors to a tuple.\n\n Args:\n inputs: a list of tensors.\n\n Returns:\n a tuple of tensors. if any input is a special constant tensor, replace it\n with None.\n \"\"\"\n inputs = tf.nest.flatten(inputs)\n outputs = []\n for x in inputs:\n if is_special_none_tensor(x):\n outputs.append(None)\n else:\n outputs.append(x)\n x = tuple(outputs)\n\n # To trick the very pointless 'unbalanced-tuple-unpacking' pylint check\n # from triggering.\n if len(x) == 1:\n return x[0]\n return tuple(outputs)\n\n\ndef is_special_none_tensor(tensor):\n \"\"\"Checks if a tensor is a special None Tensor.\"\"\"\n return tensor.shape.ndims == 0 and tensor.dtype == tf.int32\n\n\n# TODO(hongkuny): consider moving custom string-map lookup to keras api.\ndef get_activation(identifier):\n \"\"\"Maps a identifier to a Python function, e.g., \"relu\" => `tf.nn.relu`.\n\n It checks string first and if it is one of customized activation not in TF,\n the corresponding activation will be returned. For non-customized activation\n names and callable identifiers, always fallback to tf.keras.activations.get.\n\n Args:\n identifier: String name of the activation function or callable.\n\n Returns:\n A Python function corresponding to the activation function.\n \"\"\"\n if isinstance(identifier, six.string_types):\n name_to_fn = {\n \"gelu\": activations.gelu,\n \"custom_swish\": activations.swish,\n }\n identifier = str(identifier).lower()\n if identifier in name_to_fn:\n return tf.keras.activations.get(name_to_fn[identifier])\n return tf.keras.activations.get(identifier)\n\n\ndef get_shape_list(tensor, expected_rank=None, name=None):\n \"\"\"Returns a list of the shape of tensor, preferring static dimensions.\n\n Args:\n tensor: A tf.Tensor object to find the shape of.\n expected_rank: (optional) int. The expected rank of `tensor`. If this is\n specified and the `tensor` has a different rank, and exception will be\n thrown.\n name: Optional name of the tensor for the error message.\n\n Returns:\n A list of dimensions of the shape of tensor. All static dimensions will\n be returned as python integers, and dynamic dimensions will be returned\n as tf.Tensor scalars.\n \"\"\"\n if expected_rank is not None:\n assert_rank(tensor, expected_rank, name)\n\n shape = tensor.shape.as_list()\n\n non_static_indexes = []\n for (index, dim) in enumerate(shape):\n if dim is None:\n non_static_indexes.append(index)\n\n if not non_static_indexes:\n return shape\n\n dyn_shape = tf.shape(tensor)\n for index in non_static_indexes:\n shape[index] = dyn_shape[index]\n return shape\n\n\ndef assert_rank(tensor, expected_rank, name=None):\n \"\"\"Raises an exception if the tensor rank is not of the expected rank.\n\n Args:\n tensor: A tf.Tensor to check the rank of.\n expected_rank: Python integer or list of integers, expected rank.\n name: Optional name of the tensor for the error message.\n\n Raises:\n ValueError: If the expected shape doesn't match the actual shape.\n \"\"\"\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n raise ValueError(\n \"For the tensor `%s`, the actual tensor rank `%d` (shape = %s) is not \"\n \"equal to the expected tensor rank `%s`\" %\n (name, actual_rank, str(tensor.shape), str(expected_rank)))\n"
] | [
[
"tensorflow.convert_to_tensor",
"tensorflow.Graph",
"tensorflow.python.eager.context.num_gpus",
"tensorflow.compat.v1.local_variables_initializer",
"tensorflow.zeros",
"numpy.arange",
"tensorflow.test.main",
"tensorflow.compat.v1.global_variables_initializer",
"numpy.array"
],
[
"tensorflow.keras.mixed_precision.experimental.LossScaleOptimizer",
"tensorflow.keras.layers.Input",
"tensorflow.keras.models.Model",
"tensorflow.reduce_mean",
"tensorflow.keras.mixed_precision.experimental.Policy",
"tensorflow.python.distribute.combinations.combine",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.optimizers.SGD",
"tensorflow.test.main",
"tensorflow.keras.mixed_precision.experimental.set_policy",
"tensorflow.compat.v1.train.summary_iterator",
"tensorflow.cast",
"tensorflow.keras.layers.Dense",
"tensorflow.compat.v1.logging.error",
"numpy.random.rand",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.version.VERSION.startswith",
"numpy.random.randint"
],
[
"tensorflow.nest.flatten",
"tensorflow.constant",
"tensorflow.shape",
"tensorflow.keras.activations.get"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.