repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
piotrmarciniak1998/MachineLearningCourse | [
"7f587145f20afe49a3fd29f91c704ee761491459"
] | [
"machine_learning_course/lab_03_2.py"
] | [
"from sklearn import datasets\nimport pandas as pd\nimport numpy as np\n\nX, y = datasets.load_iris(return_X_y=True, as_frame=True)\nprint(X.describe()) # Opis\n\nprint(X.head()) # 5 pierwszych próbek\nprint(X.tail()) # 5 ostatnich próbek\n"
] | [
[
"sklearn.datasets.load_iris"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
IfyAnene7/pypkgs_isa | [
"d1d346f4da54a49735915f90fc19dbdb6695850f"
] | [
"tests/test_pypkgs_isa.py"
] | [
"from pypkgs_isa import __version__\nfrom pypkgs_isa import pypkgs_isa\nimport pandas as pd\n\ndef test_version():\n assert __version__ == '0.1.0'\n \n\ndef test_catbind():\n a = pd.Categorical([\"character\", \"hits\", \"your\", \"eyeballs\"])\n b = pd.Categorical([\"but\", \"integer\", \"where it\", \"counts\"])\n assert ((pypkgs_isa.catbind(a, b)).codes == [1, 4, 7, 3, 0, 5, 6, 2]).all()\n assert ((pypkgs_isa.catbind(a, b)).categories == [\"but\", \"character\",\n \"counts\", \"eyeballs\", \"hits\", \"integer\", \"where it\", \"your\"]).all()"
] | [
[
"pandas.Categorical"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
damien2012eng/deploy-ml-model-windows10 | [
"9f03df226046023b92bf27089cb04baf79a04dfe"
] | [
"app.py"
] | [
"from flask import Flask ,render_template,url_for,request\nimport numpy as np \nimport pickle\n\napp = Flask(__name__)\nmodel = pickle.load(open('model.pkl','rb'))\n\n# Home Route\[email protected]('/')\ndef home():\n\treturn render_template('home.html')\n\n# prediction\[email protected]('/predict',methods=['POST'])\ndef predict():\n\tint_feature = [x for x in request.form.values()]\n\tprint(int_feature)\n\tint_feature = [float(i) for i in int_feature]\n\tfinal_features = [np.array(int_feature)]\n\tprediction = model.predict(final_features)\n\n\toutput = prediction\n\tprint(output)\n\n\treturn render_template('home.html',prediction_text= output)\n\n\nif __name__ == \"__main__\":\n\tapp.run(host='0.0.0.0', port=80)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
karthickrock/Namma-food | [
"1e144b1317f673a06ef0e3533f4c36e2bdc7cd13"
] | [
"label_image.py"
] | [
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\r\n#\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n#\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n#\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n# ==============================================================================\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import division\r\nfrom __future__ import print_function\r\n\r\n#import argparse\r\nimport sys\r\nimport time\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\n'''import cv2\r\ncal=0\r\nprint(\"your calories diet plan:\")\r\namount=int(input())\r\nwhile True:\r\n camera = cv2.VideoCapture(0)\r\n while True:\r\n return_value,image = camera.read()\r\n \r\n cv2.imshow('image',image)\r\n if cv2.waitKey(1)& 0xFF == ord('s'):\r\n cv2.imwrite('test.jpg',image)\r\n break\r\n camera.release()\r\n cv2.destroyAllWindows()'''\r\n #graph module definition\r\ndef load_graph(model_file):\r\n graph = tf.Graph()\r\n graph_def = tf.GraphDef()\r\n\r\n with open(model_file, \"rb\") as f:\r\n graph_def.ParseFromString(f.read())\r\n with graph.as_default():\r\n tf.import_graph_def(graph_def)\r\n\r\n return graph\r\n#module that read tensor from a image\r\ndef read_tensor_from_image_file(file_name, input_height=299, input_width=299,\r\n input_mean=0, input_std=255):\r\n input_name = \"file_reader\"\r\n output_name = \"normalized\"\r\n file_reader = tf.read_file(file_name, input_name)\r\n if file_name.endswith(\".png\"):\r\n image_reader = tf.image.decode_png(file_reader, channels = 3,\r\n name='png_reader')\r\n elif file_name.endswith(\".gif\"):\r\n image_reader = tf.squeeze(tf.image.decode_gif(file_reader,\r\n name='gif_reader'))\r\n elif file_name.endswith(\".bmp\"):\r\n image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')\r\n else:\r\n image_reader = tf.image.decode_jpeg(file_reader, channels = 3,\r\n name='jpeg_reader')\r\n float_caster = tf.cast(image_reader, tf.float32)\r\n dims_expander = tf.expand_dims(float_caster, 0);\r\n resized = tf.image.resize_bilinear(dims_expander, [input_height, input_width])\r\n normalized = tf.divide(tf.subtract(resized, [input_mean]), [input_std])\r\n sess = tf.Session()\r\n result = sess.run(normalized)\r\n\r\n return result\r\n#generating labels for the detected object\r\ndef load_labels(label_file):\r\n label = []\r\n proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()\r\n for l in proto_as_ascii_lines:\r\n label.append(l.rstrip())\r\n return label\r\n\r\nif __name__ == \"__main__\":\r\n file_name = \"tf_files/flower_photos/daisy/3475870145_685a19116d.jpg\"\r\n model_file = \"retrained_graph.pb\"\r\n label_file = \"retrained_labels.txt\"\r\n input_height = 299\r\n input_width = 299\r\n input_mean = 0\r\n input_std = 255\r\n input_layer = \"Mul\"\r\n output_layer = \"final_result\"\r\n\r\n '''parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--image\", help=\"image to be processed\")\r\n parser.add_argument(\"--graph\", help=\"graph/model to be executed\")\r\n parser.add_argument(\"--labels\", help=\"name of file containing labels\")\r\n parser.add_argument(\"--input_height\", type=int, help=\"input height\")\r\n parser.add_argument(\"--input_width\", type=int, help=\"input width\")\r\n parser.add_argument(\"--input_mean\", type=int, help=\"input mean\")\r\n parser.add_argument(\"--input_std\", type=int, help=\"input std\")\r\n parser.add_argument(\"--input_layer\", help=\"name of input layer\")\r\n parser.add_argument(\"--output_layer\", help=\"name of output layer\")\r\n args = parser.parse_args()\r\n\r\n if args.graph:\r\n model_file = args.graph\r\n if args.image:\r\n file_name = args.image\r\n if args.labels:\r\n label_file = args.labels\r\n if args.input_height:\r\n input_height = args.input_height\r\n if args.input_width:\r\n input_width = args.input_width\r\n if args.input_mean:\r\n input_mean = args.input_mean\r\n if args.input_std:\r\n input_std = args.input_std\r\n if args.input_layer:\r\n input_layer = args.input_layer\r\n if args.output_layer:\r\n output_layer = args.output_layer'''\r\n#-------------------------giving image-----------------------------------\r\n \r\n file_name=\"th (11).jpg\"\r\n \r\n graph = load_graph(model_file)\r\n t = read_tensor_from_image_file(file_name,\r\n input_height=input_height,\r\n input_width=input_width,\r\n input_mean=input_mean,\r\n input_std=input_std)\r\n\r\n input_name = \"import/\" + input_layer\r\n output_name = \"import/\" + output_layer\r\n input_operation = graph.get_operation_by_name(input_name);\r\n output_operation = graph.get_operation_by_name(output_name);\r\n\r\n with tf.Session(graph=graph) as sess:\r\n start = time.time()\r\n results = sess.run(output_operation.outputs[0],\r\n {input_operation.outputs[0]: t})\r\n end=time.time()\r\n results = np.squeeze(results)\r\n\r\n top_k = results.argsort()[-1:][::-1]\r\n labels = load_labels(label_file)\r\n\r\n print('\\nEvaluation time (1-image): {:.3f}s\\n'.format(end-start))\r\n\r\n for i in top_k:\r\n print(\"the given image is:\",labels[i],\"with{:.2f}%\".format(results[i]*100),\"accuracy\")\r\n a=labels[i]\r\n'''#calories detection \r\n \r\n def calories(arg):\r\n switcher={\r\n \"dosa\":120,\r\n \"idle\":39,\r\n \"rice\":204,\r\n \"rotti\":110,\r\n \"vada\":103,\r\n \"chicken\":239,\r\n \"lemon rice\":221,\r\n \"tomato rice\":126,\r\n \"banana\":105,\r\n \"fish\":206,\r\n \"sambar\":130\r\n }\r\n return switcher[arg]\r\n #diet planning\r\n b=calories(a)\r\n if cal<=amount:\r\n cal=b+cal\r\n if cal>=amount:\r\n cal=cal-b\r\n #else:\r\n #cal=cal\r\n print(\"total calories addded is\",cal)\r\n \r\n\r\np'''\r\n"
] | [
[
"tensorflow.Graph",
"tensorflow.image.resize_bilinear",
"tensorflow.import_graph_def",
"tensorflow.read_file",
"tensorflow.gfile.GFile",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.image.decode_png",
"tensorflow.expand_dims",
"tensorflow.image.decode_bmp",
"tensorflow.subtract",
"tensorflow.image.decode_gif",
"tensorflow.Session",
"tensorflow.GraphDef",
"tensorflow.image.decode_jpeg"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
CAVED123/reinvent-scaffold-decorator | [
"37d0a8a571cc633a54b8a1d90884763bf503c347"
] | [
"train_model.py"
] | [
"#!/usr/bin/env python\n# coding=utf-8\n\n\"\"\"\nScript to train a model\n\"\"\"\n\nimport argparse\nimport os.path\nimport glob\nimport itertools as it\n\nimport torch\nimport torch.utils.tensorboard as tbx\n\nimport collect_stats_from_model as csfm\n\nimport models.model as mm\nimport models.actions as ma\n\nimport utils.chem as uc\nimport utils.log as ul\n\n\nclass TrainModelPostEpochHook(ma.TrainModelPostEpochHook):\n\n WRITER_CACHE_EPOCHS = 25\n\n def __init__(self, output_prefix_path, epochs, validation_sets, lr_scheduler, collect_stats_params,\n lr_params, collect_stats_frequency, save_frequency, logger=None):\n ma.TrainModelPostEpochHook.__init__(self, logger)\n\n self.validation_sets = validation_sets\n self.lr_scheduler = lr_scheduler\n\n self.output_prefix_path = output_prefix_path\n self.save_frequency = save_frequency\n self.epochs = epochs\n self.log_path = collect_stats_params[\"log_path\"]\n\n self.collect_stats_params = collect_stats_params\n self.collect_stats_frequency = collect_stats_frequency\n\n self.lr_params = lr_params\n\n self._writer = None\n if self.collect_stats_frequency > 0:\n self._reset_writer()\n\n def __del__(self):\n self._close_writer()\n\n def run(self, model, training_set, epoch):\n if self.collect_stats_frequency > 0 and epoch % self.collect_stats_frequency == 0:\n validation_set = next(self.validation_sets)\n other_values = {\"lr\": self.get_lr()}\n\n ma.CollectStatsFromModel(\n model=model, epoch=epoch, training_set=training_set,\n validation_set=validation_set, writer=self._writer, other_values=other_values, logger=self.logger,\n sample_size=self.collect_stats_params[\"sample_size\"]\n ).run()\n\n self.lr_scheduler.step(epoch=epoch)\n\n lr_reached_min = (self.get_lr() < self.lr_params[\"min\"])\n if lr_reached_min or self.epochs == epoch \\\n or (self.save_frequency > 0 and (epoch % self.save_frequency == 0)):\n model.save(self._model_path(epoch))\n\n if self._writer and (epoch % self.WRITER_CACHE_EPOCHS == 0):\n self._reset_writer()\n\n return not lr_reached_min\n\n def get_lr(self):\n return self.lr_scheduler.optimizer.param_groups[0][\"lr\"]\n\n def _model_path(self, epoch):\n return \"{}.{}\".format(self.output_prefix_path, epoch)\n\n def _reset_writer(self):\n self._close_writer()\n self._writer = tbx.SummaryWriter(log_dir=self.log_path)\n\n def _close_writer(self):\n if self._writer:\n self._writer.close()\n\n\ndef main():\n \"\"\"Main function.\"\"\"\n params = parse_args()\n lr_params = params[\"learning_rate\"]\n cs_params = params[\"collect_stats\"]\n params = params[\"other\"]\n\n # ut.set_default_device(\"cuda\")\n\n model = mm.DecoratorModel.load_from_file(params[\"input_model_path\"])\n optimizer = torch.optim.Adam(model.network.parameters(), lr=lr_params[\"start\"])\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=lr_params[\"step\"], gamma=lr_params[\"gamma\"])\n\n training_sets = load_sets(params[\"training_set_path\"])\n validation_sets = []\n if params[\"collect_stats_frequency\"] > 0:\n validation_sets = load_sets(cs_params[\"validation_set_path\"])\n\n post_epoch_hook = TrainModelPostEpochHook(\n params[\"output_model_prefix_path\"], params[\"epochs\"], validation_sets, lr_scheduler,\n cs_params, lr_params, collect_stats_frequency=params[\"collect_stats_frequency\"],\n save_frequency=params[\"save_every_n_epochs\"], logger=LOG\n )\n\n epochs_it = ma.TrainModel(model, optimizer, training_sets, params[\"batch_size\"], params[\"clip_gradients\"],\n params[\"epochs\"], post_epoch_hook, logger=LOG).run()\n\n for num, (total, epoch_it) in enumerate(epochs_it):\n for _ in ul.progress_bar(epoch_it, total=total, desc=\"#{}\".format(num)):\n pass # we could do sth in here, but not needed :)\n\n\ndef load_sets(set_path):\n file_paths = [set_path]\n if os.path.isdir(set_path):\n file_paths = sorted(glob.glob(\"{}/*.smi\".format(set_path)))\n\n for path in it.cycle(file_paths): # stores the path instead of the set\n yield list(uc.read_csv_file(path, num_fields=2))\n\n\nSUBCATEGORIES = [\"collect_stats\", \"learning_rate\"]\n\n\ndef parse_args():\n \"\"\"Parses input arguments.\"\"\"\n parser = argparse.ArgumentParser(\n description=\"Train a model on a SMILES file.\")\n\n _add_base_args(parser)\n _add_lr_args(parser)\n csfm.add_stats_args(parser, with_prefix=True, with_required=False)\n\n args = {k: {} for k in [\"other\", *SUBCATEGORIES]}\n for arg, val in vars(parser.parse_args()).items():\n done = False\n for prefix in SUBCATEGORIES:\n if arg.startswith(prefix):\n arg_name = arg[len(prefix) + 1:]\n args[prefix][arg_name] = val\n done = True\n if not done:\n args[\"other\"][arg] = val\n\n # special case\n args[\"other\"][\"collect_stats_frequency\"] = args[\"collect_stats\"][\"frequency\"]\n del args[\"collect_stats\"][\"frequency\"]\n\n return args\n\n\ndef _add_lr_args(parser):\n parser.add_argument(\"--learning-rate-start\", \"--lrs\",\n help=\"Starting learning rate for training. [DEFAULT: 1E-4]\", type=float, default=1E-4)\n parser.add_argument(\"--learning-rate-min\", \"--lrmin\",\n help=\"Minimum learning rate, when reached the training stops. [DEFAULT: 1E-6]\",\n type=float, default=1E-6)\n parser.add_argument(\"--learning-rate-gamma\", \"--lrg\",\n help=\"Ratio which the learning change is changed. [DEFAULT: 0.95]\", type=float, default=0.95)\n parser.add_argument(\"--learning-rate-step\", \"--lrt\",\n help=\"Number of epochs until the learning rate changes. [DEFAULT: 1]\",\n type=int, default=1)\n\n\ndef _add_base_args(parser):\n parser.add_argument(\"--input-model-path\", \"-i\", help=\"Input model file\", type=str, required=True)\n parser.add_argument(\"--output-model-prefix-path\", \"-o\",\n help=\"Prefix to the output model (may have the epoch appended).\", type=str, required=True)\n parser.add_argument(\"--training-set-path\", \"-s\", help=\"Path to a file with (scaffold, decoration) tuples \\\n or a directory with many of these files to be used as training set.\", type=str, required=True)\n parser.add_argument(\"--save-every-n-epochs\", \"--sen\",\n help=\"Save the model after n epochs. [DEFAULT: 1]\", type=int, default=1)\n parser.add_argument(\"--epochs\", \"-e\", help=\"Number of epochs to train. [DEFAULT: 100]\", type=int, default=100)\n parser.add_argument(\"--batch-size\", \"-b\",\n help=\"Number of molecules processed per batch. [DEFAULT: 128]\", type=int, default=128)\n parser.add_argument(\"--clip-gradients\",\n help=\"Clip gradients to a given norm. [DEFAULT: 1.0]\", type=float, default=1.0)\n parser.add_argument(\"--collect-stats-frequency\", \"--csf\",\n help=\"Collect statistics every n epochs. [DEFAULT: 0]\", type=int, default=0)\n\n\nif __name__ == \"__main__\":\n LOG = ul.get_logger(name=\"train_model\")\n main()\n"
] | [
[
"torch.utils.tensorboard.SummaryWriter",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JulianKlug/scop | [
"b0d6a805a11ee8b4d0f53a4d6a5ec402988298e4"
] | [
"exploratory_experiments/outcome/with_lesion_input/cross_validate_with_ground_truth_lesion.py"
] | [
"import os\nimport shutil\nimport tempfile\nfrom datetime import datetime\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import StratifiedKFold\n\nfrom keras_scope.test import test\nfrom utils.utils import ensure_dir, save_dataset\nfrom keras_scope.train import train\n\ndef cross_validate_with_ground_truth_lesion():\n \"\"\"\n Rationale: check if adding ground truth lesion adds information for final outcome prediction\n \"\"\"\n n_repeats = 1\n n_folds = 5\n\n label_file_path = '/mnt/data/hendrik/jk/scope_data/joined_anon_outcomes_2015_2016_2017_2018_df.xlsx'\n imaging_dataset_path = '/mnt/data/hendrik/jk/scope_data/data_set_with_combined_mRS_0-2_90_days.npz'\n output_dir = '/home/hendrik/jk/output/keras_scope/with_gt_lesion_cross_validation'\n\n # imaging_dataset_path = \"/Users/jk1/stroke_datasets/dataset_files/perfusion_data_sets/data_set_with_combined_mRS_0-2_90_days.npz\"\n # label_file_path = \"/Users/jk1/temp/scope_test/joined_anon_outcomes_2015_2016_2017_2018_df.xlsx\"\n # output_dir = '/Users/jk1/temp/cv_scope_test'\n\n channels = [0, 1, 2, 3, 4]\n outcome = \"combined_mRS_0-2_90_days\"\n desired_shape = (46, 46, 46)\n epochs = 400\n initial_learning_rate = 0.0001\n\n ensure_dir(output_dir)\n output_dir = os.path.join(output_dir, 'cv_' + datetime.now().strftime(\"%Y%m%d_%H%M%S\"))\n ensure_dir(output_dir)\n\n # load data\n params = np.load(imaging_dataset_path, allow_pickle=True)['params']\n ids = np.load(imaging_dataset_path, allow_pickle=True)['ids']\n outcomes_df = pd.read_excel(label_file_path)\n labels = np.array([outcomes_df.loc[outcomes_df['anonymised_id'] == subj_id, outcome].iloc[0] for\n subj_id in ids])\n raw_images = np.load(imaging_dataset_path, allow_pickle=True)['ct_inputs']\n raw_lesions = np.load(imaging_dataset_path, allow_pickle=True)['ct_lesion_GT']\n raw_images = np.concatenate((raw_images, np.expand_dims(raw_lesions, axis=-1)), axis=-1)\n\n raw_masks = np.load(imaging_dataset_path, allow_pickle=True)['brain_masks']\n all_indices = list(range(len(ids)))\n\n result_df = pd.DataFrame()\n\n # Start iteration of repeated k-fold cross-validation\n iteration = 0\n for j in np.random.randint(0, high=10000, size=n_repeats):\n iteration_dir = os.path.join(output_dir, 'iteration_' + str(iteration))\n ensure_dir(iteration_dir)\n\n print('Crossvalidation: Creating iteration ' + str(iteration) + ' of a total of ' + str(n_repeats))\n\n fold = 0\n kf = StratifiedKFold(n_splits = n_folds, shuffle = True, random_state = j)\n for train_indices, test_indices in kf.split(all_indices, labels):\n fold_dir = os.path.join(iteration_dir, 'fold_' + str(fold))\n ensure_dir(fold_dir)\n\n # save temporary dataset files for this fold\n temp_data_dir = tempfile.mkdtemp()\n temp_train_data_path = os.path.join(temp_data_dir, 'train_dataset.npz')\n temp_test_data_path = os.path.join(temp_data_dir, 'test_dataset.npz')\n save_dataset(raw_images[train_indices], raw_masks[train_indices], ids[train_indices], params, temp_train_data_path)\n save_dataset(raw_images[test_indices], raw_masks[test_indices], ids[test_indices], params, temp_test_data_path)\n\n # train\n _, model_path = train(label_file_path, temp_train_data_path, fold_dir, outcome, channels, desired_shape,\n initial_learning_rate, epochs)\n\n # test\n fold_result_dict = test(model_path, label_file_path, temp_test_data_path, outcome, channels, desired_shape)\n\n # store results\n fold_result_dict.update({'iteration': iteration, 'fold': fold, 'kfold_split_seed': j})\n fold_result_df = pd.DataFrame(fold_result_dict, index=[0])\n result_df = result_df.append(fold_result_df)\n\n # todo add all parameters to train and test\n\n shutil.rmtree(temp_data_dir)\n fold += 1\n iteration += 1\n\n print('Median test AUC', result_df['auc'].median())\n print('Median test accuracy', result_df['acc'].median())\n result_df.to_csv(os.path.join(output_dir, 'cv_test_results.csv'))\n\n\nif __name__ == '__main__':\n cross_validate_with_ground_truth_lesion()\n"
] | [
[
"pandas.read_excel",
"numpy.expand_dims",
"pandas.DataFrame",
"sklearn.model_selection.StratifiedKFold",
"numpy.load",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
nerdneilsfield/2D-3D-pose-tracking | [
"33678c775e53360116099d43cf0712072b53a25f"
] | [
"afm/scripts/modeling/output/output.py"
] | [
"import modeling.registry as registry\nfrom modeling.registry import OUTPUT_METHODS\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os.path as osp\nimport os\nimport scipy.io as sio\n\n@OUTPUT_METHODS.register(\"display\")\ndef display(data_dict, cfg):\n image = data_dict['image'] \n # image_resized = data_dict['image_resized']\n\n # image = np.transpose(image, [1,2,0])\n # image[...,0] = (image[...,0]*0.229 + 0.485)\n # image[...,1] = (image[...,1]*0.224 + 0.456)\n # image[...,2] = (image[...,2]*0.225 + 0.406)\n # image = np.array(image*255,dtype=np.uint8)\n # import pdb\n # pdb.set_trace()\n height, width = image.shape[:2]\n h0, w0 = data_dict['afmap_pred'].shape[1:]\n #scale_factor = np.array([width/w0,height/h0,width/w0,height/h0],dtype=np.float32)\n scale_factor = np.array([float(width)/float(w0),\n float(height)/float(h0),\n float(width)/float(w0),\n float(height)/float(h0)],dtype=np.float32)\n\n lines = data_dict['lines_pred_resized']\n lines[:,:4] *= scale_factor\n\n lengths = np.sqrt((lines[:,2]-lines[:,0])*(lines[:,2]-lines[:,0]) + (lines[:,3]-lines[:,1])*(lines[:,3]-lines[:,1]))\n ratio = lines[:,4]/lengths \n\n threshold = cfg.TEST.DISPLAY.THRESHOLD \n idx = np.where(ratio<=threshold)[0] \n lines = lines[idx]\n \n plt.imshow(image[...,::-1])\n plt.plot([lines[:,0],lines[:,2]],[lines[:,1],lines[:,3]],'r-')\n plt.xlim([0,width])\n plt.ylim([height,0])\n plt.axis('off')\n plt.show()\n\n\n@OUTPUT_METHODS.register(\"save\")\ndef save(data_dict, cfg):\n fname = data_dict['fname'].rstrip('.png')\n image = data_dict['image'] \n image_resized = data_dict['image_resized']\n\n # image = np.transpose(image, [1,2,0])\n # image[...,0] = (image[...,0]*0.229 + 0.485)\n # image[...,1] = (image[...,1]*0.224 + 0.456)\n # image[...,2] = (image[...,2]*0.225 + 0.406)\n # image = np.array(image*255,dtype=np.uint8)\n # import pdb\n # pdb.set_trace()\n height, width = image.shape[:2]\n h0, w0 = image_resized.shape[1:]\n\n scale_factor = np.array([width/w0,height/h0,width/w0,height/h0],dtype=np.float32) \n\n\n lines = data_dict['lines_pred_resized']\n lines[:,:4] *=scale_factor\n\n\n output_dir = data_dict['output_dir']\n if osp.isdir(output_dir) is not True:\n os.makedirs(output_dir)\n\n output_path = osp.join(output_dir, fname+'.mat')\n \n sio.savemat(output_path, mdict={\n 'height': height,\n 'width': width,\n 'gt': data_dict['lines_gt'],\n 'pred': lines,\n })\n\n \n\n \n # lines_pred = data_dict['lines_pred']\n\ndef build_output_method(cfg):\n assert cfg.TEST.OUTPUT_MODE in registry.OUTPUT_METHODS\n\n return registry.OUTPUT_METHODS[cfg.TEST.OUTPUT_MODE]\n\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.sqrt",
"matplotlib.pyplot.ylim",
"scipy.io.savemat",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.axis",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
Mrhsk/cpsc2021 | [
"dcb7fb23edf7df79549279d053e4a8cadab3b268"
] | [
"utils/aux_metrics.py"
] | [
"\"\"\"\nauxiliary metrics for the task of qrs detection\n\nReferences\n----------\n[1] http://2019.icbeb.org/Challenge.html\n\"\"\"\nimport math\nimport multiprocessing as mp\nfrom typing import Union, Optional, Sequence, Dict\nfrom numbers import Real\n\nimport numpy as np\nimport torch\n\nfrom torch_ecg.torch_ecg.models.loss import MaskedBCEWithLogitsLoss\n\nfrom .misc import mask_to_intervals\n\n\n__all__ = [\n \"compute_rpeak_metric\",\n \"compute_rr_metric\",\n \"compute_main_task_metric\",\n]\n\n\n_MBCE = MaskedBCEWithLogitsLoss()\n\n\ndef compute_rpeak_metric(rpeaks_truths:Sequence[Union[np.ndarray,Sequence[int]]],\n rpeaks_preds:Sequence[Union[np.ndarray,Sequence[int]]],\n fs:Real,\n thr:float=0.075,\n verbose:int=0) -> Dict[str, float]:\n \"\"\" finished, checked,\n\n Parameters\n ----------\n rpeaks_truths: sequence,\n sequence of ground truths of rpeaks locations (indices) from multiple records\n rpeaks_preds: sequence,\n predictions of ground truths of rpeaks locations (indices) for multiple records\n fs: real number,\n sampling frequency of ECG signal\n thr: float, default 0.075,\n threshold for a prediction to be truth positive,\n with units in seconds,\n verbose: int, default 0,\n print verbosity\n\n Returns\n -------\n rec_acc: float,\n accuracy of predictions\n \"\"\"\n assert len(rpeaks_truths) == len(rpeaks_preds), \\\n f\"number of records does not match, truth indicates {len(rpeaks_truths)}, while pred indicates {len(rpeaks_preds)}\"\n n_records = len(rpeaks_truths)\n record_flags = np.ones((len(rpeaks_truths),), dtype=float)\n thr_ = thr * fs\n if verbose >= 1:\n print(f\"number of records = {n_records}\")\n print(f\"threshold in number of sample points = {thr_}\")\n for idx, (truth_arr, pred_arr) in enumerate(zip(rpeaks_truths, rpeaks_preds)):\n false_negative = 0\n false_positive = 0\n true_positive = 0\n extended_truth_arr = np.concatenate((truth_arr.astype(int), [int(9.5*fs)]))\n for j, t_ind in enumerate(extended_truth_arr[:-1]):\n next_t_ind = extended_truth_arr[j+1]\n loc = np.where(np.abs(pred_arr - t_ind) <= thr_)[0]\n if j == 0:\n err = np.where((pred_arr >= 0.5*fs + thr_) & (pred_arr <= t_ind - thr_))[0]\n else:\n err = np.array([], dtype=int)\n err = np.append(\n err,\n np.where((pred_arr >= t_ind+thr_) & (pred_arr <= next_t_ind-thr_))[0]\n )\n\n false_positive += len(err)\n if len(loc) >= 1:\n true_positive += 1\n false_positive += len(loc) - 1\n elif len(loc) == 0:\n false_negative += 1\n\n if false_negative + false_positive > 1:\n record_flags[idx] = 0\n elif false_negative == 1 and false_positive == 0:\n record_flags[idx] = 0.3\n elif false_negative == 0 and false_positive == 1:\n record_flags[idx] = 0.7\n\n if verbose >= 2:\n print(f\"for the {idx}-th record,\\ntrue positive = {true_positive}\\nfalse positive = {false_positive}\\nfalse negative = {false_negative}\")\n\n rec_acc = round(np.sum(record_flags) / n_records, 4)\n\n if verbose >= 1:\n print(f'QRS_acc: {rec_acc}')\n print('Scoring complete.')\n\n metrics = {\"qrs_score\": rec_acc}\n\n return metrics\n\n\ndef compute_rr_metric(rr_truths:Sequence[Union[np.ndarray,Sequence[int]]],\n rr_preds:Sequence[Union[np.ndarray,Sequence[int]]],\n weight_masks:Optional[Sequence[Union[np.ndarray,Sequence[int]]]]=None,\n verbose:int=0) -> Dict[str, float]:\n \"\"\" finished, checked,\n\n this metric for evaluating the RR_LSTM model,\n which imitates the metric provided by the organizers of CPSC2021\n\n Parameters\n ----------\n rr_truths: array_like,\n sequences of AF labels on rr intervals, of shape (n_samples, seq_len)\n rr_truths: array_like,\n sequences of AF predictions on rr intervals, of shape (n_samples, seq_len)\n\n Returns\n -------\n rr_score: float,\n the score computed from predicts from rr sequences,\n similar to CPSC2021 challenge metric\n neg_masked_bce: float,\n negative masked BCE loss\n \"\"\"\n with mp.Pool(processes=max(1,mp.cpu_count())) as pool:\n af_episode_truths = pool.starmap(\n func=mask_to_intervals,\n iterable=[(row,1,True) for row in rr_truths]\n )\n with mp.Pool(processes=max(1,mp.cpu_count())) as pool:\n af_episode_preds = pool.starmap(\n func=mask_to_intervals,\n iterable=[(row,1,True) for row in rr_preds]\n )\n scoring_mask = np.zeros_like(np.array(rr_truths))\n n_samples, seq_len = scoring_mask.shape\n for idx, sample in enumerate(af_episode_truths):\n for itv in sample:\n scoring_mask[idx][max(0,itv[0]-2):min(seq_len,itv[0]+3)] = 0.5\n scoring_mask[idx][max(0,itv[1]-2):min(seq_len,itv[1]+3)] = 0.5\n scoring_mask[idx][max(0,itv[0]-1):min(seq_len,itv[0]+2)] = 1\n scoring_mask[idx][max(0,itv[1]-1):min(seq_len,itv[1]+2)] = 1\n rr_score = sum([\n scoring_mask[idx][itv].sum() / max(1, len(af_episode_truths[idx])) \\\n for idx in range(n_samples) for itv in af_episode_preds[idx]\n ])\n rr_score += sum([0==len(t)==len(p) for t, p in zip(af_episode_truths, af_episode_preds)])\n neg_masked_bce = -_MBCE(\n torch.as_tensor(rr_preds, dtype=torch.float32, device=torch.device(\"cpu\")),\n torch.as_tensor(rr_truths, dtype=torch.float32, device=torch.device(\"cpu\")),\n torch.as_tensor(weight_masks, dtype=torch.float32, device=torch.device(\"cpu\")),\n ).item()\n metrics = {\n \"rr_score\": rr_score,\n \"neg_masked_bce\": neg_masked_bce,\n }\n return metrics\n\n\ndef compute_main_task_metric(mask_truths:Sequence[Union[np.ndarray,Sequence[int]]],\n mask_preds:Sequence[Union[np.ndarray,Sequence[int]]],\n fs:Real,\n reduction:int,\n weight_masks:Optional[Sequence[Union[np.ndarray,Sequence[int]]]]=None,\n rpeaks:Optional[Sequence[Sequence[int]]]=None,\n verbose:int=0) -> Dict[str, float]:\n \"\"\" finished, checked,\n\n this metric for evaluating the main task model (seq_lab or unet),\n which imitates the metric provided by the organizers of CPSC2021\n\n Parameters\n ----------\n mask_truths: array_like,\n sequences of AF labels on rr intervals, of shape (n_samples, seq_len)\n mask_preds: array_like,\n sequences of AF predictions on rr intervals, of shape (n_samples, seq_len)\n fs: Real,\n sampling frequency of the model input ECGs,\n used when (indices of) `rpeaks` not privided\n reduction: int,\n reduction ratio of the main task model\n rpeaks: array_like, optional,\n indices of rpeaks in the model input ECGs,\n if set, more precise scores can be computed\n\n Returns\n -------\n main_score: float,\n the score computed from predicts from the main task model,\n similar to CPSC2021 challenge metric\n neg_masked_bce: float,\n negative masked BCE loss\n \"\"\"\n default_rr = int(fs * 0.8 / reduction)\n if rpeaks is not None:\n assert len(rpeaks) == len(mask_truths)\n with mp.Pool(processes=max(1,mp.cpu_count())) as pool:\n af_episode_truths = pool.starmap(\n func=mask_to_intervals,\n iterable=[(row,1,True) for row in mask_truths]\n )\n with mp.Pool(processes=max(1,mp.cpu_count())) as pool:\n af_episode_preds = pool.starmap(\n func=mask_to_intervals,\n iterable=[(row,1,True) for row in mask_preds]\n )\n af_episode_truths = [[[itv[0]*reduction, itv[1]*reduction] for itv in sample] for sample in af_episode_truths]\n af_episode_preds = [[[itv[0]*reduction, itv[1]*reduction] for itv in sample] for sample in af_episode_preds]\n n_samples, seq_len = np.array(mask_truths).shape\n scoring_mask = np.zeros((n_samples, seq_len*reduction))\n for idx, sample in enumerate(af_episode_truths):\n for itv in sample:\n if rpeaks is not None:\n itv_rpeaks = [i for i,r in enumerate(rpeaks[idx]) if itv[0] <= r < itv[1]]\n start = rpeaks[idx][max(0,itv_rpeaks[0]-2)]\n end = rpeaks[idx][min(len(rpeaks[idx])-1,itv_rpeaks[0]+2)] + 1\n scoring_mask[idx][start:end] = 0.5\n start = rpeaks[idx][max(0,itv_rpeaks[-1]-2)]\n end = rpeaks[idx][min(len(rpeaks[idx])-1,itv_rpeaks[-1]+2)] + 1\n scoring_mask[idx][start:end] = 0.5\n start = rpeaks[idx][max(0,itv_rpeaks[0]-1)]\n end = rpeaks[idx][min(len(rpeaks[idx])-1,itv_rpeaks[0]+1)] + 1\n scoring_mask[idx][start:end] = 1\n start = rpeaks[idx][max(0,itv_rpeaks[-1]-1)]\n end = rpeaks[idx][min(len(rpeaks[idx])-1,itv_rpeaks[-1]+1)] + 1\n scoring_mask[idx][start:end] = 1\n else:\n scoring_mask[idx][max(0,itv[0]-2*default_rr):min(seq_len,itv[0]+2*default_rr+1)] = 0.5\n scoring_mask[idx][max(0,itv[1]-2*default_rr):min(seq_len,itv[1]+2*default_rr+1)] = 0.5\n scoring_mask[idx][max(0,itv[0]-1*default_rr):min(seq_len,itv[0]+1*default_rr+1)] = 1\n scoring_mask[idx][max(0,itv[1]-1*default_rr):min(seq_len,itv[1]+1*default_rr+1)] = 1\n main_score = sum([\n scoring_mask[idx][itv].sum() / max(1, len(af_episode_truths[idx])) \\\n for idx in range(n_samples) for itv in af_episode_preds[idx]\n ])\n main_score += sum([0==len(t)==len(p) for t, p in zip(af_episode_truths, af_episode_preds)])\n neg_masked_bce = -_MBCE(\n torch.as_tensor(mask_preds, dtype=torch.float32, device=torch.device(\"cpu\")),\n torch.as_tensor(mask_truths, dtype=torch.float32, device=torch.device(\"cpu\")),\n torch.as_tensor(weight_masks, dtype=torch.float32, device=torch.device(\"cpu\")),\n ).item()\n metrics = {\n \"main_score\": main_score,\n \"neg_masked_bce\": neg_masked_bce,\n }\n return metrics\n\n\n\n# class WeightedBoundaryLoss(nn.Module):\n# \"\"\"\n# \"\"\"\n# __name__ = \"WeightedBoundaryLoss\"\n\n# def __init__(self, weight_map:Dict[int,Real], sigma:Real, w:Real) -> NoReturn:\n# \"\"\"\n# \"\"\"\n# self.weight_map = weight_map\n# self.sigma = sigma\n# self.w = w\n\n# def forward(self, input:Tensor, target:Tensor) -> Tensor:\n# \"\"\"\n# \"\"\"\n# _device = input.device\n# _dtype = input.dtype\n# weight_mask = torch.zeros_like(input, dtype=_dtype, device=_device)\n# if target.shape[-1] == 1:\n# w = torch.full_like(input, self.weight_map[0], dtype=_dtype, device=_device)\n# weight_mask.add_((target < 0.5)*w)\n# w = torch.full_like(input, self.weight_map[1], dtype=_dtype, device=_device)\n# weight_mask.add_((target > 0.5)*w)\n# else:\n# for i in range(input.shape[-1]):\n# w = torch.full(input.shape[:-1], self.weight_map[i], dtype=_dtype, device=_device)\n# weight_mask[...,i].add_((target[...,i] > 0.5)*w)\n "
] | [
[
"numpy.abs",
"torch.device",
"numpy.array",
"numpy.where",
"numpy.sum",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sdyinzhen/AutoBEL | [
"213011ef8c6d2bcc16ac92a263e615f4a7033ff4"
] | [
"source_code/plt_MC_models_smpls.py"
] | [
"#Author: David Zhen Yin\n#Contact: [email protected]\n#Date: Apri 03, 2019\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\ndef m_ensampl_plt(m_smpls_pos, m_smpls_pri, layernum, i_dim, j_dim, k_dim, dobs):\n ''' \n Plot the the ensemble mean and variance of prior and posterior\n Args:\n m_smpls_pos: the name of posterior model sample matrix, N_realizations x Grid_dims\n m_smpls_pri: the name of prior model sample matrix, N_realizations x Grid_dims\n layernum: which layer to plot\n dobs: the well data observation: 4 x well_number: row1 = x, row2=j, row3 = k, row4 = value\n \n Output:\n Pareto Plot for SA\n '''\n\n plt.figure(figsize=(12.5,6.2))\n \n plt.subplot(221)\n m_ens_mean = np.mean(m_smpls_pos[:,j_dim*i_dim*(layernum-1):j_dim*i_dim*layernum], axis=0)\n \n plt.imshow(m_ens_mean.reshape(j_dim,i_dim), \\\n extent=[0,50000,25000,0], cmap='jet', \\\n vmax=np.max(m_ens_mean), vmin=np.min(m_ens_mean))\n \n plt.colorbar(fraction = 0.02)\n plt.scatter(dobs[0, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n dobs[1, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n c= dobs[3, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)], \\\n cmap='jet', edgecolors=(0, 0, 0), linewidth =0.9, s=80, vmax=np.max(m_ens_mean), vmin=np.min(m_ens_mean))\n plt.tick_params(labelsize=13)\n plt.title('Ensemble Mean of Posterior', fontsize = 15, style='italic')\n\n plt.subplot(222)\n m_ens_mean_pri = np.mean(m_smpls_pri[:,j_dim*i_dim*(layernum-1):j_dim*i_dim*layernum], axis=0)\n \n plt.imshow(m_ens_mean_pri.reshape(j_dim,i_dim), \\\n extent=[0,50000,25000,0], cmap='jet', \\\n vmax=np.max(m_ens_mean), vmin=np.min(m_ens_mean))\n \n plt.colorbar(fraction = 0.02)\n \n plt.scatter(dobs[0, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n dobs[1, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n c= dobs[3, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)], \\\n cmap='jet', edgecolors=(0, 0, 0), linewidth =0.9, s=80, \\\n vmax=np.max(m_ens_mean), vmin=np.min(m_ens_mean))\n plt.tick_params(labelsize=13)\n plt.title('Ensemble Mean of Prior', fontsize = 15, style='italic')\n \n \n plt.subplot(223)\n m_ens_var = np.var(m_smpls_pos[:,j_dim*i_dim*(layernum-1):j_dim*i_dim*layernum], axis=0)\n plt.imshow(m_ens_var.reshape(j_dim,i_dim), extent=[0,50000,25000,0], cmap='bwr', \\\n vmin = np.min(m_ens_var), vmax=np.max(m_ens_var))\n plt.colorbar(fraction = 0.02)\n plt.scatter(dobs[0, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n dobs[1, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n color='white', edgecolors=(0, 0, 0), linewidth =1.2, s=80, alpha=0.7)\n plt.tick_params(labelsize=13)\n plt.title('Ensemble Variance of Posterior', fontsize = 15, style='italic')\n \n\n plt.subplot(224)\n m_ens_var_pri = np.var(m_smpls_pri[:,j_dim*i_dim*(layernum-1):j_dim*i_dim*layernum], axis=0)\n plt.imshow(m_ens_var_pri.reshape(j_dim,i_dim), extent=[0,50000,25000,0], cmap='bwr', \\\n vmin = np.min(m_ens_var), vmax=np.max(m_ens_var))\n plt.colorbar(fraction = 0.02)\n plt.scatter(dobs[0, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n dobs[1, len(dobs[0,:])*(layernum-1):len(dobs[0,:])*(layernum)]*250, \\\n color='white', edgecolors=(0, 0, 0), linewidth =1.2, s=80, alpha=0.7)\n plt.tick_params(labelsize=13)\n plt.title('Ensemble Variance of Prior', fontsize = 15, style='italic') \n \n plt.tight_layout()\n \n return \n\ndef mc_samples_plot(model_ndarray, m_name, m_type, pri_post, i_dim, j_dim, k_dim, layernum):\n '''\n Plot the 1st to 12th monte carlo model samples. \n Args:\n model_ndarray: (str) the ndarray of monte carlo model samples, N_realizations x Grid_dims\n i_dim, j_dim, k_dim: x, y, z dimensions of one model realization. \n layernum: which layer to show\n m_type: type of the model, 1 - continous; 2 - categorical\n \n Output:\n Pareto Plot for SA\n '''\n fig=plt.figure(figsize=(15,14))\n count = 1\n k_dim = int(len(model_ndarray[1])/(i_dim*j_dim))\n if m_type == 2: # 2 is for cate gorical models; 1 is for continous models. \n \n for realnum in range(12):\n if count == 12:\n plot=fig.add_subplot(3, 4, count)\n plt.text(0.1, 0.48, '...', fontsize=50)\n plt.text(0.0, 0.6, 'Total '+str(len(model_ndarray))+' samples', fontsize=16, style='italic')\n plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())\n count = count + 1\n else:\n grid_data = model_ndarray[realnum].reshape(k_dim, j_dim, i_dim) \n plot=fig.add_subplot(3, 4, count)\n count = count+1\n prop_mean = format(np.mean(grid_data),'.4f')\n plt.imshow(grid_data[layernum-1],cmap='viridis_r') # for poro\n plt.xticks(fontsize = 13)\n plt.yticks(fontsize = 13)\n plt.title(pri_post +' ' + m_name+ ' model #'+str(count-1), fontsize=14, style='italic')\n\n \n else:\n for realnum in range(12):\n if count == 12:\n plot=fig.add_subplot(3, 4, count)\n plt.text(0.1, 0.48, '...', fontsize=50)\n plt.text(0.0, 0.6, 'Total ' + str(len(model_ndarray))+' samples', fontsize=16, style='italic')\n plt.setp(plt.gca(), frame_on=False, xticks=(), yticks=())\n count = count + 1\n else:\n \n grid_data = model_ndarray[realnum].reshape(k_dim, j_dim, i_dim) \n plot=fig.add_subplot(3, 4, count)\n count = count+1\n\n prop_mean = format(np.mean(grid_data),'.4f')\n plot.set_xlabel('average \"' + m_name + '\" = ' + str(prop_mean), fontsize = 14)\n c_max = np.max(grid_data[layernum-1])*1.05\n c_min = np.min(grid_data[layernum-1])\n\n plt.imshow(grid_data[layernum-1],cmap='jet', \\\n vmin=c_min,vmax=c_max*1.05) \n plt.xticks(fontsize = 13)\n plt.yticks(fontsize = 13)\n plt.title(pri_post +' ' +m_name+ ' model #'+str(count-1), fontsize=14, style='italic')\n\n# plt.colorbar(fraction = 0.02)\n plt.colorbar(fraction = 0.02, ticks=np.around([c_min*1.1, c_max], decimals=1))\n plt.subplots_adjust(top=0.55, bottom=0.08, left=0.10, right=0.95, hspace=0.15,\n wspace=0.35)\n return"
] | [
[
"matplotlib.pyplot.gca",
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.title",
"numpy.min",
"numpy.around",
"matplotlib.pyplot.colorbar",
"numpy.max",
"matplotlib.pyplot.subplot",
"numpy.mean",
"matplotlib.pyplot.subplots_adjust",
"numpy.var",
"matplotlib.pyplot.text",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chcorophyll/general_image_process_python | [
"0ab3b3da246808c36822d31fa0fd226f8d4079ab"
] | [
"HaarCascade.py"
] | [
"\"\"\"\nReferences:\nhttps://github.com/Simon-Hohberg/Viola-Jones/tree/master/violajones\nhttps://medium.datadriveninvestor.com/understanding-and-implementing-the-viola-jones-image-classification-algorithm-85621f7fe20b\nhttps://github.com/aparande/FaceDetection\n\"\"\"\nimport math\nimport pickle\nfrom multiprocessing import Pool\nfrom functools import partial\nimport progressbar\nimport numpy as np\nfrom sklearn.feature_selection import SelectPercentile, f_classif\n\n# LOADING_BAR_LENGTH = 50\n#\n#\n# # construct integral image\n# def to_integral_image(image_array):\n# row_sum = np.zeros(image_array.shape)\n# integral_image_array = np.zeros((image_array.shape[0] + 1, image_array.shape[1] + 1))\n# for x in range(image_array.shape[1]):\n# for y in range(image_array.shape[0]):\n# row_sum[y, x] = row_sum[y - 1, x] + image_array[y, x]\n# integral_image_array[y + 1, x + 1] = integral_image_array[y + 1, x] + row_sum[y, x]\n# return integral_image_array\n#\n#\n# # integral compute\n# def sum_region(integral_image_array, top_left, bottom_right):\n# top_left = (top_left[1], top_left[0])\n# bottom_right = (bottom_right[1], bottom_right[0])\n# if top_left == bottom_right:\n# return integral_image_array[top_left]\n# top_right = (bottom_right[0], top_left[1])\n# bottom_left = (top_left[0], bottom_right[1])\n# return integral_image_array[bottom_right] - integral_image_array[top_right] - \\\n# integral_image_array[bottom_left] + integral_image_array[top_left]\n#\n#\n# # enum type\n# def enum(**enums):\n# return type(\"Enum\", (), enums)\n#\n#\n# FeatureType = enum(TWO_VERTICAL=(1, 2), TWO_HORIZONTAL=(2, 1),\n# THREE_HORIZONTAL=(3, 1), THREE_VERTICAL=(1, 3), FOUR=(2, 2))\n# FeatureTypes = [FeatureType.TWO_VERTICAL, FeatureType.TWO_HORIZONTAL, FeatureType.THREE_VERTICAL,\n# FeatureType.THREE_HORIZONTAL, FeatureType.FOUR]\n#\n#\n# # Haar Feature\n# class HaarLikeFeature(object):\n#\n# def __init__(self, feature_type, position, width, height, threshold, polarity):\n# self.type = feature_type\n# self.top_left = position\n# self.bottom_right = (position[0] + width, position[1] + height)\n# self.width = width\n# self.height = height\n# self.threshold = threshold\n# self.polarity = polarity\n# self.weight = 1\n#\n# def get_score(self, integral_image):\n# score = 0\n# if self.type == FeatureType.TWO_VEWRTICAL:\n# first = sum_region(integral_image,\n# self.top_left,\n# (self.top_left[0] + self.width, int(self.top_left[1] + self.height / 2)))\n# second = sum_region(integral_image,\n# (self.top_left[0], int(self.top_left[1] + self.height / 2)),\n# self.bottom_right)\n# score = first - second\n# elif self.type == FeatureType.TWO_HORIZONTAL:\n# first = sum_region(integral_image,\n# self.top_left,\n# (int(self.top_left[0] + self.width / 2), self.top_left[1] + self.height))\n# second = sum_region(integral_image,\n# (int(self.top_left[0] + self.width / 2), self.top_left[1]),\n# self.bottom_right)\n# score = first - second\n# elif self.type == FeatureType.THREE_HORIZONTAL:\n# first = sum_region(integral_image,\n# self.top_left,\n# (int(self.top_left[0] + self.width / 3), self.top_left[1] + self.height))\n# second = sum_region(integral_image,\n# (int(self.top_left[0] + self.width / 3), self.top_left[1]),\n# (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1] + self.height))\n# third = sum_region(integral_image,\n# (int(self.top_left[0] + 2 * self.width / 3), self.top_left[1]),\n# self.bottom_right)\n# score = first - second + third\n# elif self.type == FeatureType.THREE_VERTICAL:\n# first = sum_region(integral_image,\n# self.top_left,\n# (self.bottom_right[0], int(self.top_left[1] + self.height / 3)))\n# second = sum_region(integral_image,\n# (self.top_left[0], int(self.top_left[1] + self.height / 3)),\n# (self.bottom_right[0], int(self.top_left[1] + 2 * self.height / 3)))\n#\n# third = sum_region(integral_image,\n# (self.top_left[0], int(self.top_left[1] + 2 * self.height / 3)),\n# self.bottom_right)\n# score = first - second + third\n# elif self.type == FeatureType.FOUR:\n# first = sum_region(integral_image,\n# self.top_left,\n# (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)))\n# second = sum_region(integral_image,\n# (int(self.top_left[0] + self.width / 2), self.top_left[1]),\n# (self.bottom_right[0], int(self.top_left[1] + self.height / 2)))\n# third = sum_region(integral_image,\n# (self.top_left[0], int(self.top_left[1] + self.height / 2)),\n# (int(self.top_left[0] + self.width / 2), self.bottom_right[1]))\n# fourth = sum_region(integral_image,\n# (int(self.top_left[0] + self.width / 2), int(self.top_left[1] + self.height / 2)),\n# self.bottom_right)\n# score = first - second - third + fourth\n# return score\n#\n# def get_vote(self, integral_image):\n# score = self.get_score(integral_image)\n# return self.weight * (1 if score < self.polarity * self.threshold else -1)\n#\n#\n# # create feature\n# def create_features(img_height, img_width, min_feature_width,\n# max_feature_width, min_feature_height, max_feature_height):\n# features = []\n# for feature in FeatureTypes:\n# feature_start_width = max(min_feature_width, feature[0])\n# for feature_width in range(feature_start_width, max_feature_width, feature[0]):\n# feature_start_height = max(min_feature_height, feature[1])\n# for feature_height in range(feature_start_height, max_feature_height, feature[1]):\n# for x in range(img_width - feature_width):\n# for y in range(img_height - feature_height):\n# features.append(HaarLikeFeature(feature, (x, y), feature_width, feature_height, 0, 1)) ## ???\n# features.append(HaarLikeFeature(feature, (x, y), feature_width, feature_height, 0, -1))\n# return features\n#\n#\n# # get feature vote\n# def get_feature_vote(feature, image):\n# return feature.get_vote(image)\n#\n#\n# # adaboost learn\n# def learn(positive_integral_images, negative_integral_images,\n# num_classifiers=-1, min_feature_width=1,\n# max_feature_width=-1, min_feature_height=1, max_feature_height=-1):\n# num_pos = len(positive_integral_images)\n# num_neg = len(negative_integral_images)\n# num_imgs = num_pos + num_neg\n# img_height, img_width = positive_integral_images[0].shape\n# max_feature_height = img_height if max_feature_height == -1 else max_feature_height\n# max_feature_width = img_width if min_feature_width == -1 else max_feature_width\n# positive_weights = np.ones(num_pos) * 1.0 / (2 * num_pos) # positive_probability(1/2) * positive_sample_probability(1 / num_pos)\n# negative_weights = np.ones(num_neg) * 1.0 / (2 * num_neg)\n# weights = np.hstack((positive_weights, negative_weights))\n# labels = np.hstack((np.ones(num_pos), np.ones(num_neg) * -1))\n# images = positive_integral_images + negative_integral_images\n# features = create_features(img_height, img_width, min_feature_width,\n# max_feature_width, min_feature_height, max_feature_width)\n# num_features = len(features)\n# feature_indexs = list(range(num_features))\n# num_classifiers = num_features if num_classifiers == -1 else num_classifiers\n# votes = np.zeros((num_imgs, num_features))\n# bar = progressbar.ProgressBar()\n# pool = Pool(processes=None)\n# for i in bar(range(num_imgs)):\n# votes[i, :] = np.array(list(pool.map(partial(get_feature_vote, image=images[i]), features)))\n# classifiers = []\n# bar = progressbar.ProgressBar()\n# for _ in bar((range(num_classifiers))):\n# classification_errors = np.zeros(len(feature_indexs))\n# weights *= 1 / np.sum(weights)\n# for f in range(len(feature_indexs)):\n# f_idx = feature_indexs[f]\n# error = sum(map(lambda img_idx: weights[img_idx] if labels[img_idx] != votes[img_idx, f_idx] else 0,\n# range(num_imgs)))\n# classification_errors[f] = error\n# min_error_idx = np.argmin(classification_errors)\n# best_error = classification_errors[min_error_idx]\n# best_feature_idx = feature_indexs[min_error_idx]\n# best_feature = features[best_feature_idx]\n# feature_weight = 0.5 * np.log((1 - best_error) / best_error)\n# best_feature.weight = feature_weight\n# classifiers.append(best_feature)\n# weights = np.array(list(map(lambda img_idx: weights[img_idx] * np.sqrt((1-best_error)/best_error)\n# if labels[img_idx] != votes[img_idx, best_feature_idx]\n# else weights[img_idx] * np.sqrt(best_error/(1-best_error)), range(num_imgs))))\n# feature_indexs.remove(best_feature_idx)\n# return classifiers\n\n\n# version above not clear on adaboost\ndef integral_image(image):\n ii = np.zeros(image.shape)\n s = np.zeros(image.shape)\n for y in range(image.shape[0]):\n for x in range(image.shape[0]):\n s[y][x] = s[y-1][x] + image[y][x] if y - 1 >= 0 else image[y][x]\n ii[y][x] = ii[y][x-1] + s[y][x] if x - 1 >= 0 else s[y][x]\n return ii\n\n\nclass RectangleRegion:\n\n def __init__(self, x, y, width, height):\n self.x = x\n self.y = y\n self.width = width\n self.height = height\n\n def compute_feature(self, ii):\n return ii[self.y+self.height][self.x+self.width] + ii[self.y][self.x] \\\n - ii[self.y+self.height][self.x] - ii[self.y][self.x+self.width]\n\n def __str__(self):\n return \"(x= %d, y= %d, width= %d, height= %d)\" % (self.x, self.y, self.width, self.height)\n\n def __repr__(self):\n return \"RectangleRegion(%d, %d, %d, %d)\" % (self.x, self.y, self.width, self.height)\n\n\nclass WeakClassifier:\n\n def __init__(self, positive_regions, negative_regions, threshold, polarity):\n self.positive_regions = positive_regions\n self.negative_regions = negative_regions\n self.threshold = threshold\n self.polarity = polarity\n\n def classify(self, x):\n feature = lambda ii: sum([pos.compute_feature(ii) for pos in self.positive_regions])\n - sum([neg.compute_feature(ii) for neg in self.negative_regions])\n return 1 if self.polarity * feature(x) < self.polarity * self.threshold else 0\n\n def __str__(self):\n return \"Weak Clf (threshold=%d, polarity=%d, %s, %s)\" % (self.threshold,\n self.polarity,\n str(self.positive_regions),\n str(self.negative_regions))\n\n\nclass ViolaJones:\n\n def __init__(self, T=10):\n self.T = T\n self.alphas = []\n self.clfs = []\n\n def build_features(self, image_shape):\n height, width = image_shape\n features = []\n for w in range(1, width+1):\n for h in range(1, height+1):\n i = 0\n while i + w < width:\n j = 0\n while j + h < height:\n immediate = RectangleRegion(i, j, w, h)\n right = RectangleRegion(i+w, j, w, h)\n if i + 2 * w < width:\n features.append(([right], [immediate]))\n bottom = RectangleRegion(i, j+h, w, h)\n if j + 2 * h < height:\n features.append(([immediate], [bottom]))\n right_2 = RectangleRegion(i+2*w, j, w, h)\n if i + 3 * w < width:\n features.append(([right], [right_2, immediate]))\n bottom_2 = RectangleRegion(i, j+2*h, w, h)\n if j + 3 * h < height:\n features.append(([bottom], [bottom_2, immediate]))\n bottom_right = RectangleRegion(i+w, j+h, w, h)\n if i + 2 * w < width and j + 2 * h < height:\n features.append(([right, bottom], [immediate, bottom_right]))\n j += 1\n i += 1\n return np.array(features)\n\n def apply_features(self, features, training_data):\n X = np.zeros((len(features), len(training_data)))\n y = np.array(list(map(lambda data: data[1], training_data)))\n i = 0\n for positive_regions, negative_regions in features:\n feature = lambda ii: sum([pos.compute_feature(ii) for pos in positive_regions]) \\\n - sum([neg.compute_feature(ii) for neg in negative_regions])\n X[i] = list(map(lambda data: feature(data[0]), training_data))\n i += 1\n return X, y\n\n def train_weak(self, X, y, features, weights):\n total_pos, total_neg = 0, 0\n for weight, label in zip(weights, y):\n if label == 1:\n total_pos += weight\n else:\n total_neg += weight\n classifiers = []\n total_features = X.shape[0]\n for index, feature in enumerate(X):\n if len(classifiers) % 1000 == 0 and len(classifiers) != 0:\n print(\"Trained %d classifiers out of %d\" % (len(classifiers), total_features))\n applied_feature = sorted(zip(weights, feature, y), key=lambda x: x[1])\n pos_seen, neg_seen = 0, 0\n pos_weights, neg_weights = 0, 0\n min_error, best_feature, best_threshold, best_polarity = float(\"inf\"), None, None, None\n for w, f, label in applied_feature:\n error = min(neg_weights + total_pos - pos_weights, pos_weights + total_neg - neg_weights)\n if error < min_error:\n min_error = error\n best_feature = features[index]\n best_threshold = f\n best_polarity = 1 if pos_seen > neg_seen else -1\n if label == 1:\n pos_seen += 1\n pos_weights += w\n else:\n neg_seen += 1\n neg_weights += w\n clf = WeakClassifier(best_feature[0], best_feature[1], best_threshold, best_polarity)\n classifiers.append(clf)\n return classifiers\n\n def select_best(self, classifiers, weights, training_data):\n best_clf, best_error, best_accuracy = None, float(\"inf\"), None\n for clf in classifiers:\n error, accuracy = 0, []\n for data, w in zip(training_data, weights):\n correctness = abs(clf.classify(data[0]) - data[1])\n accuracy.append(correctness)\n error += w * correctness\n error = error / len(training_data)\n if error < best_error:\n best_clf, best_error, best_accuracy = clf, error, accuracy\n return best_clf, best_error, best_accuracy\n\n def train(self, training, pos_num, neg_num):\n weights = np.zeros(len(training))\n training_data = []\n for x in range(len(training)):\n training_data.append((integral_image(training[x][0]), training[x][1]))\n if training[x][1] == 1:\n weights[x] = 1.0 / (2 * pos_num)\n else:\n weights[x] = 1.0 / (2 * neg_num)\n features = self.build_features(training_data[0][0].shape)\n X, y = self.apply_features(features, training_data)\n # optimize : pre select features to accelerate\n indices = SelectPercentile(f_classif, percentile=10).fit(X.T, y).get_support(indices=True)\n X = X[indices]\n features = features[indices]\n for t in range(self.T):\n weights = weights / np.linalg.norm(weights)\n weak_classifiers = self.train_weak(X, y, features, weights)\n clf, error, accuracy = self.select_best(weak_classifiers, weights, training_data)\n beta = error / (1 - error)\n for i in range(len(accuracy)):\n weights[i] = weights[i] * (beta ** (1 - accuracy[i]))\n alpha = math.log(1.0/beta)\n self.alphas.append(alpha)\n self.clfs.append(clf)\n print(\"Chose classifier: %s with accuracy: %f and alpha: %f\" %\n (str(clf), len(accuracy) - sum(accuracy), alpha))\n\n def classify(self, image):\n total = 0\n ii = integral_image(image)\n for alpha, clf in zip(self.alphas, self.clfs):\n total += alpha * clf.classify(ii)\n return 1 if total >= 0.5 * sum(alpha) else 0\n\n def save(self, file_name):\n with open(file_name+\".pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load(file_name):\n with open(file_name+\".pkl\", \"rb\") as f:\n return pickle.load(f)\n\n\n# Cascade Haar\nclass CascadeClassifier():\n\n def __init__(self, layers):\n self.layers = layers\n self.clfs = []\n\n def train(self, training):\n pos, neg = [], []\n for ex in training:\n if ex[1] == 1:\n pos.append(ex)\n else:\n neg.append(ex)\n for feature_num in self.layers:\n if len(neg) == 0:\n print(\"Stopping early. FPR = 0\")\n clf = ViolaJones(T=feature_num)\n clf.train(pos+neg, len(pos), len(neg))\n self.clfs.append(clf)\n false_positives = []\n for ex in neg:\n if self.classify(ex[0]) == 1:\n false_positives.append(ex)\n neg = false_positives\n\n def classify(self, image):\n for clf in self.clfs:\n if clf.classify(image) == 0:\n return 0\n return 1\n\n def save(self, file_name):\n with open(file_name+\".pkl\", \"wb\") as f:\n pickle.dump(self, f)\n\n @staticmethod\n def load(file_name):\n with open(file_name + \".pkl\", \"rb\") as f:\n return pickle.load(f)"
] | [
[
"numpy.array",
"numpy.zeros",
"numpy.linalg.norm",
"sklearn.feature_selection.SelectPercentile"
]
] | [
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JackFurby/VGG-Concept-Bottleneck | [
"c916b29f99e0a70622ac2b5dbeb54b8cce332f70"
] | [
"CUB/dataset.py"
] | [
"\"\"\"\nGeneral utils for training, evaluation and data loading\n\"\"\"\nimport os\nimport torch\nimport pickle\nimport numpy as np\nimport torchvision.transforms as transforms\n\nfrom PIL import Image\nfrom CUB.config import BASE_DIR, N_ATTRIBUTES\nfrom torch.utils.data import BatchSampler\nfrom torch.utils.data import Dataset, DataLoader\n\n\nclass CUBDataset(Dataset):\n \"\"\"\n Returns a compatible Torch Dataset object customized for the CUB dataset\n \"\"\"\n\n def __init__(self, pkl_file_paths, use_attr, no_img, uncertain_label, image_dir, n_class_attr, transform=None):\n \"\"\"\n Arguments:\n pkl_file_paths: list of full path to all the pkl data\n use_attr: whether to load the attributes (e.g. False for simple finetune)\n no_img: whether to load the images (e.g. False for A -> Y model)\n uncertain_label: if True, use 'uncertain_attribute_label' field (i.e. label weighted by uncertainty score, e.g. 1 & 3(probably) -> 0.75)\n image_dir: default = 'images'. Will be append to the parent dir\n n_class_attr: number of classes to predict for each attribute. If 3, then make a separate class for not visible\n transform: whether to apply any special transformation. Default = None, i.e. use standard ImageNet preprocessing\n \"\"\"\n self.data = []\n self.is_train = any([\"train\" in path for path in pkl_file_paths])\n if not self.is_train:\n assert any([(\"test\" in path) or (\"val\" in path) for path in pkl_file_paths])\n for file_path in pkl_file_paths:\n self.data.extend(pickle.load(open(file_path, 'rb')))\n self.transform = transform\n self.use_attr = use_attr\n self.no_img = no_img\n self.uncertain_label = uncertain_label\n self.image_dir = image_dir\n self.n_class_attr = n_class_attr\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, idx):\n img_data = self.data[idx]\n img_path = os.getcwd() + \"/datasets/CUB_200_2011/data/images/\" + img_data['img_path']\n img = Image.open(img_path).convert('RGB')\n\n class_label = img_data['class_label'] - 1\n if self.transform:\n img = self.transform(img)\n\n if self.use_attr:\n if self.uncertain_label:\n attr_label = img_data['uncertain_attribute_label']\n else:\n attr_label = img_data['attribute_label']\n if self.no_img:\n if self.n_class_attr == 3:\n one_hot_attr_label = np.zeros((N_ATTRIBUTES, self.n_class_attr))\n one_hot_attr_label[np.arange(N_ATTRIBUTES), attr_label] = 1\n return one_hot_attr_label, class_label\n else:\n return attr_label, class_label\n else:\n return img, class_label, attr_label\n else:\n return img, class_label\n\n\nclass ImbalancedDatasetSampler(torch.utils.data.sampler.Sampler):\n \"\"\"Samples elements randomly from a given list of indices for imbalanced dataset\n Arguments:\n indices (list, optional): a list of indices\n num_samples (int, optional): number of samples to draw\n \"\"\"\n\n def __init__(self, dataset, indices=None):\n # if indices is not provided,\n # all elements in the dataset will be considered\n self.indices = list(range(len(dataset))) \\\n if indices is None else indices\n\n # if num_samples is not provided,\n # draw `len(indices)` samples in each iteration\n self.num_samples = len(self.indices)\n\n # distribution of classes in the dataset\n label_to_count = {}\n for idx in self.indices:\n label = self._get_label(dataset, idx)\n if label in label_to_count:\n label_to_count[label] += 1\n else:\n label_to_count[label] = 1\n\n # weight for each sample\n weights = [1.0 / label_to_count[self._get_label(dataset, idx)]\n for idx in self.indices]\n self.weights = torch.DoubleTensor(weights)\n\n def _get_label(self, dataset, idx): # Note: for single attribute dataset\n return dataset.data[idx]['attribute_label'][0]\n\n def __iter__(self):\n idx = (self.indices[i] for i in torch.multinomial(\n self.weights, self.num_samples, replacement=True))\n return idx\n\n def __len__(self):\n return self.num_samples\n\ndef load_data(pkl_paths, use_attr, no_img, batch_size, uncertain_label=False, n_class_attr=2, image_dir='images', resampling=False, resol=299):\n \"\"\"\n Note: Inception needs (299,299,3) images with inputs scaled between -1 and 1\n Loads data with transformations applied, and upsample the minority class if there is class imbalance and weighted loss is not used\n NOTE: resampling is customized for first attribute only, so change sampler.py if necessary\n \"\"\"\n resized_resol = int(resol * 256/224)\n is_training = any(['train.pkl' in f for f in pkl_paths])\n if is_training:\n transform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n #transforms.RandomSizedCrop(resol),\n transforms.ColorJitter(brightness=32/255, saturation=(0.5, 1.5)),\n transforms.RandomResizedCrop(resol),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n #transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),\n ])\n else:\n transform = transforms.Compose([\n #transforms.Resize((resized_resol, resized_resol)),\n transforms.CenterCrop(resol),\n transforms.ToTensor(), #implicitly divides by 255\n transforms.Normalize(mean = [0.5, 0.5, 0.5], std = [2, 2, 2])\n #transforms.Normalize(mean = [ 0.485, 0.456, 0.406 ], std = [ 0.229, 0.224, 0.225 ]),\n ])\n\n dataset = CUBDataset(pkl_paths, use_attr, no_img, uncertain_label, image_dir, n_class_attr, transform)\n if is_training:\n drop_last = True\n shuffle = True\n else:\n drop_last = False\n shuffle = False\n if resampling:\n sampler = BatchSampler(ImbalancedDatasetSampler(dataset), batch_size=batch_size, drop_last=drop_last)\n loader = DataLoader(dataset, batch_sampler=sampler)\n else:\n loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle, drop_last=drop_last)\n return loader\n\ndef find_class_imbalance(pkl_file, multiple_attr=False, attr_idx=-1):\n \"\"\"\n Calculate class imbalance ratio for binary attribute labels stored in pkl_file\n If attr_idx >= 0, then only return ratio for the corresponding attribute id\n If multiple_attr is True, then return imbalance ratio separately for each attribute. Else, calculate the overall imbalance across all attributes\n \"\"\"\n imbalance_ratio = []\n data = pickle.load(open(os.path.join(BASE_DIR, pkl_file), 'rb'))\n n = len(data)\n n_attr = len(data[0]['attribute_label'])\n if attr_idx >= 0:\n n_attr = 1\n if multiple_attr:\n n_ones = [0] * n_attr\n total = [n] * n_attr\n else:\n n_ones = [0]\n total = [n * n_attr]\n for d in data:\n labels = d['attribute_label']\n if multiple_attr:\n for i in range(n_attr):\n n_ones[i] += labels[i]\n else:\n if attr_idx >= 0:\n n_ones[0] += labels[attr_idx]\n else:\n n_ones[0] += sum(labels)\n for j in range(len(n_ones)):\n imbalance_ratio.append(total[j]/n_ones[j] - 1)\n if not multiple_attr: #e.g. [9.0] --> [9.0] * 312\n imbalance_ratio *= n_attr\n return imbalance_ratio\n"
] | [
[
"numpy.arange",
"torch.utils.data.DataLoader",
"torch.multinomial",
"torch.DoubleTensor",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Software-project-management-substation/BNN | [
"f175b8bf267d98b2c62e78922c51f7209ef912db"
] | [
"BilinearCNN/CUB_200.py"
] | [
"import os\r\nimport pickle\r\nimport numpy as np\r\nimport PIL.Image\r\n# from tqdm import tqdm\r\nimport torch.utils.data\r\n\r\nclass CUB_200(torch.utils.data.Dataset):\r\n def __init__(self, file_path, train=True, transform=None, target_transform=None):\r\n self.file_path = file_path\r\n self.train = train\r\n self.transform = transform\r\n self.target_transform = target_transform\r\n\r\n if not (os.path.isfile(os.path.join(self.file_path, 'processed/train.pkl'))\r\n and os.path.isfile(os.path.join(self.file_path, 'processed/test.pkl'))):\r\n self.process()\r\n\r\n if self.train:\r\n print('Read the training dataset...')\r\n self.train_data, self.train_labels = pickle.load(\r\n open(os.path.join(self.file_path, 'processed/train.pkl'), 'rb'))\r\n print('Read successfully!')\r\n else:\r\n print('Read the test dataset...')\r\n self.test_data, self.test_labels = pickle.load(\r\n open(os.path.join(self.file_path, 'processed/test.pkl'), 'rb'))\r\n print('Read successfully!')\r\n\r\n def __getitem__(self, index):\r\n if self.train:\r\n image, label = self.train_data[index], self.train_labels[index]\r\n else:\r\n image, label = self.test_data[index], self.test_labels[index]\r\n\r\n # Transform to PIL.Image format\r\n image = PIL.Image.fromarray(image)\r\n\r\n if self.transform is not None:\r\n image = self.transform(image)\r\n if self.target_transform is not None:\r\n label = self.target_transform(label)\r\n\r\n return image, label\r\n\r\n def __len__(self):\r\n if self.train:\r\n return len(self.train_data)\r\n else:\r\n return len(self.test_data)\r\n\r\n def process(self):\r\n image_path = os.path.join(self.file_path, 'raw/CUB_200_2011/images/')\r\n id_and_path = np.genfromtxt(os.path.join(self.file_path, 'raw/CUB_200_2011/images.txt'), dtype=str)\r\n id_and_isTrain = np.genfromtxt(os.path.join(self.file_path, 'raw/CUB_200_2011/train_test_split.txt'), dtype=int)\r\n\r\n train_data = []\r\n train_labels = []\r\n test_data = []\r\n test_labels = []\r\n print('Data preprocessing, storage files')\r\n # pbar = tqdm(total=len(id_and_path))\r\n for id in range(len(id_and_path)):\r\n image = PIL.Image.open(os.path.join(image_path, id_and_path[id, 1]))\r\n label = int(id_and_path[id, 1][:3]) - 1\r\n\r\n # Converts gray scale to RGB\r\n if image.getbands()[0] == 'L':\r\n image = image.convert('RGB')\r\n\r\n np_image = np.array(image)\r\n image.close()\r\n\r\n if id_and_isTrain[id, 1] == 1:\r\n train_data.append(np_image)\r\n train_labels.append(label)\r\n else:\r\n test_data.append(np_image)\r\n test_labels.append(label)\r\n # pbar.update(1)\r\n # pbar.close()\r\n\r\n # Store as a.pkl file\r\n pickle.dump((train_data, train_labels), open(os.path.join(self.file_path, 'processed/train.pkl'), 'wb'))\r\n pickle.dump((test_data, test_labels), open(os.path.join(self.file_path, 'processed/test.pkl'), 'wb'))\r\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiulianalways/python-machineLearn | [
"ef13791d002e22fa5c4122e9faf2762d51096828"
] | [
"Perceptron01.py"
] | [
"#coding:utf-8\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom chapter01.Perceptron import Perceptron\nfrom matplotlib.colors import ListedColormap\n\n#数据下载网址:https://archive.ics.uci.edu/ml/machine-learning-databases/iris/\ndef get_flowers_feature():\n '''\n iris.data中一共包含了150条数据,包含了三种鸢尾花\n 这里我们一共使用100条数据,山鸢尾(setosa)和变色鸢尾(versicolor)\n 每一组鸢尾花数据都包含了四个特征\n 为了方便绘图只挑选其中的两个特征(花瓣的长度和萼片的长度)\n '''\n # 通过pandas读取鸢尾花数据,一定要加header=None否则不会包括第一行数据\n df = pd.read_csv(\"iris.data\",header=None)\n #获取前100组鸢尾花数据以及对应的标签\n flowers_name = df.iloc[:100,4].values\n #将花的名字转换为标签,setosa为-1,versicolor为1\n flowers_label = np.where(flowers_name==\"Iris-setosa\",-1,1)\n #选取前100组鸢尾花的第一个()和第二个特征\n flowers_feature = df.iloc[0:100,[0,2]].values\n return flowers_feature,flowers_label\n\ndef plot_flowers_distribute():\n\n flowers_feature,flowers_label = get_flowers_feature()\n #根据山鸢尾的两个特征进行绘图\n plt.scatter(flowers_feature[:50,0],flowers_feature[:50,1],\n color=\"red\",marker=\"o\",label=u\"山鸢尾\")\n #根据变色鸢尾的两个特征进行绘图\n plt.scatter(flowers_feature[50:100,0],flowers_feature[50:100,1],\n color=\"blue\",marker=\"x\",label=u\"变色鸢尾\")\n #设置x轴的标签\n plt.xlabel(u\"花瓣长度(cm)\")\n #设置y轴的标签\n plt.ylabel(u\"萼片长度(cm)\")\n #设置显示label的位置\n plt.legend(loc=\"upper left\")\n plt.show()\n\ndef iter_errors_num():\n #初始化感知器,设置感知器的学习率和迭代的次数\n perceptron = Perceptron(eta=0.1,n_iter=10)\n #获取花的特征和标签\n x,y = get_flowers_feature()\n #训练\n perceptron.fit(x,y)\n plt.plot(range(1,len(perceptron.errors_)+1),perceptron.errors_,marker=\"o\")\n plt.xlabel(\"迭代次数\")\n plt.ylabel(\"错误分类样本数量\")\n plt.show()\n\ndef plot_decision_regions(resolution=0.02):\n #定义标记符\n markers = ('s','x','o','^','v')\n #定义颜色\n colors = ('red','blue','lightgreen','gray','cyan')\n #获取花的特征和标签\n x,y = get_flowers_feature()\n perceptron = Perceptron(eta=0.1, n_iter=10)\n perceptron.fit(x,y)\n #np.unique(y)方法获取y中不重复的元素,也就只有-1和1\n #ListedColormap方法是将标记符和颜色进行对应\n #在绘图的时候红色表示正方形而蓝色表示叉\n cmap = ListedColormap(colors[:len(np.unique(y))])\n #获取第一个特征中最大值加1和最小值减1\n x1_min,x1_max = x[:,0].min() - 1,x[:,0].max() + 1\n #获取第二个特征中最大值加1和最小值减1\n x2_min,x2_max = x[:,1].min() - 1,x[:,1].max() + 1\n #根据特上面获取到特征的最大最小值构建一个网格坐标\n #通过模拟足够多的鸢尾花数据,来绘制出决策边界\n #resolution表示网格的大小\n '''\n 如一个2*2的网格坐标,网格大小为1,网格坐标点如下\n (0,0),(0,1),(0,2)\n (1,0),(1,1),(1,2)\n (2,0),(2,1),(2,2)\n '''\n xx1,xx2 = np.meshgrid(np.arange(x1_min,x1_max,resolution),\n np.arange(x2_min,x2_max,resolution))\n z = perceptron.predict(np.array([xx1.ravel(),xx2.ravel()]).T)\n z = z.reshape(xx1.shape)\n #绘制边界\n plt.contourf(xx1,xx2,z,alpha=0.4,cmap=cmap)\n #设置坐标的长度\n plt.xlim(xx1.min(),xx1.max())\n plt.ylim(xx2.min(),xx2.max())\n for idx,cl in enumerate(np.unique(y)):\n #在图上根据鸢尾花的特征进行绘点\n plt.scatter(x=x[y == cl,0],y=x[y == cl,1],\n alpha=0.8,c=cmap(idx),\n marker=markers[idx],label=cl)\n plt.xlabel(\"花瓣长度(cm)\")\n plt.ylabel(\"萼片长度(cm)\")\n plt.show()\n\nif __name__ == \"__main__\":\n # plot_flowers_distribute()\n # iter_errors_num()\n plot_decision_regions()\n\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.contourf",
"matplotlib.pyplot.scatter",
"numpy.unique",
"numpy.arange",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.where",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BrambleXu/japanese-company-lexicon | [
"562731b793b5ba9ac564c530f27a5c7d6c7bafe3"
] | [
"tools/convert_csv2jsonl.py"
] | [
"import os\nimport json\nimport unicodedata\nfrom typing import List\nfrom collections import OrderedDict, Counter, defaultdict\n\nimport pandas as pd\nfrom tqdm import tqdm\n\nfrom settings import ROOT_DIR\n\ndtypes = {'sequenceNumber': int, 'corporateNumber': int, 'process': int, 'correct': int, 'updateDate': str, \n 'changeDate': str, 'name': str, 'nameImageId': int, 'kind': int, 'prefectureName': str, \n 'cityName': str, 'streetNumber': str, 'addressImageId': int, 'prefectureCode': int, 'cityCode': int, \n 'postCode': int, 'addressOutside': str, 'addressOutsideImageId': int, 'closeDate': str, 'closeCause': int, \n 'successor': int, 'changeCause': str, 'assignmentDate': str, 'latest': int, 'enName': str, \n 'enPrefectureName': str, 'enCityName': str, 'enAddressOutside': str, 'furigana': str, 'hihyoji': int}\n \n# example: 227,1430001000632,01,0,2015-10-30,2015-10-05,\"株式会社エム.ジイ.エス\",,301,\"北海道\",\"札幌市白石区\",\"本通4丁目南2番1号\",,01,104,0030026,,,,,,,2015-10-05,1,,,,,,0\nkind_map = {101: \"国の機関\",\n 201: \"地方公共団体\",\n 301: \"株式会社\",\n 302: \"有限会社\",\n 303: \"合名会社\",\n 304: \"合資会社\",\n 305: \"合同会社\",\n 399: \"その他の設立登記法人\",\n 401: \"外国会社等\",\n 499: \"その他\"}\n\n\nclass Company:\n def __init__(self, dir_name: str) -> None:\n \"\"\"\n dir_name = '/.../data/hojin/csv/'\n \"\"\"\n self.dir_name = dir_name\n self.file_paths = self.get_paths()\n self.data = [] # Only save Company tyle: 株式会社,有限会社,合同会社\n self.total_data = 0 # All corporations \n self.names = defaultdict(int) # Save unique company name\n self.column_names = ['sequenceNumber', 'corporateNumber', 'process', 'correct', 'updateDate', \n 'changeDate', 'name', 'nameImageId', 'kind', 'prefectureName', 'cityName', \n 'streetNumber', 'addressImageId', 'prefectureCode', 'cityCode', 'postCode', \n 'addressOutside', 'addressOutsideImageId', 'closeDate', 'closeCause', \n 'successor', 'changeCause', 'assignmentDate', 'latest', 'enName', \n 'enPrefectureName', 'enCityName', 'enAddressOutside', 'furigana', 'hihyoji']\n \n \n def get_paths(self) -> List[str]:\n # Get all paths\n file_paths = []\n file_names = [f for f in os.listdir(self.dir_name) if f.endswith('.csv')]\n for file_name in file_names:\n file_paths.append(os.path.join(self.dir_name, file_name))\n return file_paths\n\n def parse_csv(self, file_paths: List[str]) -> None:\n \"\"\"Parse all csv file to get company related field\n \"\"\"\n print(\"=== Read CSV files... ===\")\n for path in tqdm(file_paths):\n df = pd.read_csv(path, names=self.column_names)\n self.total_data += df.shape[0]\n df = df[df['kind'].isin([301, 302, 305])] # Only reserve 301, 302, 305(株式会社,有限会社,合同会社)\n # df['kind'] = df['kind'].map(kind_map) # Change kind to kanji representation\n\n # Only reserve necessary columns \n df = df[['name', 'enName', 'furigana']]\n \n # Add zenkaku name, remove for concise\n # df['name_zenkaku'] = df['name']\n\n # Convert zenkaku latin to hankaku latin\n df['name'] = df['name'].apply(lambda x: unicodedata.normalize('NFKC', x)) \n # Add unique name\n for name in df['name'].values:\n self.names[name] += 1\n # Add name, enName, furigana to self.data\n hojin_list = df.to_dict('records')\n self.data += hojin_list\n print(\"=== DONE! ===\\n\")\n\n def save_jsonl(self, output_file: str) -> None:\n # keys = ['name', 'enName', 'furigana']\n print(\"=== Save companies to JSONL... ===\")\n with open(output_file, 'w') as f:\n for corporate in tqdm(self.data):\n entry = {'name': corporate['name'], 'enName': corporate['enName'], \n 'furigana': corporate['furigana']}\n json.dump(entry, f, ensure_ascii=False)\n f.write('\\n')\n print(\"=== DONE! ===\\n\")\n \n def save_names(self, output_file) -> None:\n print(\"=== Save companies names and frequency to CSV... ===\")\n with open(output_file, 'w', encoding='utf-8') as f:\n self.names = {k: v for k, v in sorted(self.names.items(), key=lambda item: item[1], reverse=True)}\n for name, value in tqdm(self.names.items()):\n f.write('{},{}\\n'.format(name, value))\n print(\"=== DONE! ===\\n\")\n\nif __name__ == \"__main__\":\n print(\"Covnert CSV to JSONL...\")\n CSV_DIR = os.path.join(ROOT_DIR, 'data/hojin/csv/') \n OUTPUT_DIR = os.path.join(ROOT_DIR, 'data/hojin/output/') \n\n company = Company(CSV_DIR)\n file_paths = company.get_paths()\n company.parse_csv(file_paths)\n print('Total corporates are: {}'.format(company.total_data))\n print('Total companies are: {}'.format(len(company.data)))\n print('Unique companies are: {}\\n'.format(len(company.names)))\n\n output_file = os.path.join(OUTPUT_DIR, 'company.jsonl')\n company.save_jsonl(output_file)\n\n output_file = os.path.join(OUTPUT_DIR, 'company_frequency.csv')\n company.save_names(output_file)\n \n print(\"Covnert CSV to JSONL Done!\")\n "
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
adekunleba/dabl | [
"c4bfc23ba2be11763a2600c7d2a7a0059cb2251c"
] | [
"dabl/plot/utils.py"
] | [
"from warnings import warn\nfrom functools import reduce\nimport itertools\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Rectangle\nfrom seaborn.utils import despine\n\n\n# from sklearn.dummy import DummyClassifier\n# from sklearn.metrics import recall_score\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import roc_curve\n\nfrom sklearn.model_selection import cross_val_score, StratifiedShuffleSplit\n\n\nfrom ..preprocessing import detect_types\n\n\ndef find_pretty_grid(n_plots, max_cols=5):\n \"\"\"Determine a good grid shape for subplots.\n\n Tries to find a way to arange n_plots many subplots on a grid in a way\n that fills as many grid-cells as possible, while keeping the number\n of rows low and the number of columns below max_cols.\n\n Parameters\n ----------\n n_plots : int\n Number of plots to arrange.\n max_cols : int, default=5\n Maximum number of columns.\n\n Returns\n -------\n n_rows : int\n Number of rows in grid.\n n_cols : int\n Number of columns in grid.\n\n Examples\n --------\n >>> find_pretty_grid(16, 5)\n (4, 4)\n >>> find_pretty_grid(11, 5)\n (3, 4)\n >>> find_pretty_grid(10, 5)\n (2, 5)\n \"\"\"\n\n # we could probably do something with prime numbers here\n # but looks like that becomes a combinatorial problem again?\n if n_plots % max_cols == 0:\n # perfect fit!\n # if max_cols is 6 do we prefer 6x1 over 3x2?\n return int(n_plots / max_cols), max_cols\n # min number of rows needed\n min_rows = int(np.ceil(n_plots / max_cols))\n best_empty = max_cols\n best_cols = max_cols\n for cols in range(max_cols, min_rows - 1, -1):\n # we only allow getting narrower if we have more cols than rows\n remainder = (n_plots % cols)\n empty = cols - remainder if remainder != 0 else 0\n if empty == 0:\n return int(n_plots / cols), cols\n if empty < best_empty:\n best_empty = empty\n best_cols = cols\n return int(np.ceil(n_plots / best_cols)), best_cols\n\n\ndef plot_coefficients(coefficients, feature_names, n_top_features=10,\n classname=None, ax=None):\n \"\"\"Visualize coefficients of a linear model.\n\n Parameters\n ----------\n coefficients : nd-array, shape (n_features,)\n Model coefficients.\n\n feature_names : list or nd-array of strings, shape (n_features,)\n Feature names for labeling the coefficients.\n\n n_top_features : int, default=10\n How many features to show. The function will show the largest (most\n positive) and smallest (most negative) n_top_features coefficients,\n for a total of 2 * n_top_features coefficients.\n \"\"\"\n\n coefficients = coefficients.squeeze()\n feature_names = np.asarray(feature_names)\n if coefficients.ndim > 1:\n # this is not a row or column vector\n raise ValueError(\"coefficients must be 1d array or column vector, got\"\n \" shape {}\".format(coefficients.shape))\n coefficients = coefficients.ravel()\n\n if len(coefficients) != len(feature_names):\n raise ValueError(\"Number of coefficients {} doesn't match number of\"\n \"feature names {}.\".format(len(coefficients),\n len(feature_names)))\n # get coefficients with large absolute values\n coef = coefficients.ravel()\n mask = coef != 0\n coef = coef[mask]\n feature_names = feature_names[mask]\n # FIXME this could be easier with pandas by sorting by a column\n interesting_coefficients = np.argsort(np.abs(coef))[-n_top_features:]\n new_inds = np.argsort(coef[interesting_coefficients])\n interesting_coefficients = interesting_coefficients[new_inds]\n # plot them\n if ax is None:\n plt.figure(figsize=(len(interesting_coefficients), 5))\n ax = plt.gca()\n colors = ['red' if c < 0 else 'blue'\n for c in coef[interesting_coefficients]]\n ax.bar(np.arange(len(interesting_coefficients)),\n coef[interesting_coefficients],\n color=colors)\n feature_names = np.array(feature_names)\n ax.set_xticks(np.arange(0, len(interesting_coefficients)))\n ax.set_xticklabels(feature_names[interesting_coefficients],\n rotation=60, ha=\"right\")\n _short_tick_names(ax, ticklabel_length=20)\n ax.set_ylabel(\"Coefficient magnitude\")\n ax.set_xlabel(\"Feature\")\n ax.set_title(classname)\n return feature_names[interesting_coefficients]\n\n\ndef heatmap(values, xlabel, ylabel, xticklabels, yticklabels, cmap=None,\n vmin=None, vmax=None, ax=None, fmt=\"%0.2f\", origin='lower'):\n if ax is None:\n ax = plt.gca()\n img = ax.pcolor(values, cmap=cmap, vmin=vmin, vmax=vmax)\n img.update_scalarmappable()\n ax.set_xlabel(_shortname(xlabel, maxlen=40))\n ax.set_ylabel(_shortname(ylabel, maxlen=40))\n ax.set_xticks(np.arange(len(xticklabels)) + .5)\n ax.set_yticks(np.arange(len(yticklabels)) + .5)\n xticklabels = [_shortname(label, maxlen=40) for label in xticklabels]\n yticklabels = [_shortname(label, maxlen=40) for label in yticklabels]\n ax.set_xticklabels(xticklabels)\n ax.set_yticklabels(yticklabels)\n ax.set_aspect(1)\n if origin == 'upper':\n ylim = ax.get_ylim()\n ax.set_ylim(ylim[::-1])\n\n for p, color, value in zip(img.get_paths(), img.get_facecolors(),\n img.get_array()):\n x, y = p.vertices[:-2, :].mean(0)\n if np.mean(color[:3]) > 0.5:\n c = 'k'\n else:\n c = 'w'\n ax.text(x, y, fmt % value, color=c, ha=\"center\", va=\"center\")\n return img\n\n\ndef _shortname(some_string, maxlen=20):\n \"\"\"Shorten a string given a maximum length.\n\n Longer strings will be shortened and the rest replaced by ...\n\n Parameters\n ----------\n some_string : string\n Input string to shorten\n maxlen : int, default=20\n\n Returns\n -------\n return_string : string\n Output string of size ``min(len(some_string), maxlen)``.\n \"\"\"\n some_string = str(some_string)\n if len(some_string) > maxlen:\n return some_string[:maxlen - 3] + \"...\"\n else:\n return some_string\n\n\ndef mosaic_plot(data, rows, cols, vary_lightness=False, ax=None):\n \"\"\"Create a mosaic plot from a dataframe.\n\n Right now only horizontal mosaic plots are supported,\n i.e. rows are prioritized over columns.\n\n Parameters\n ----------\n data : pandas data frame\n Data to tabulate.\n rows : column specifier\n Column in data to tabulate across rows.\n cols : column specifier\n Column in data to use to subpartition rows.\n vary_lightness : bool, default=False\n Whether to vary lightness across categories.\n ax : matplotlib axes or None\n Axes to plot into.\n\n Examples\n --------\n >>> from dabl.datasets import load_titanic\n >>> data = load_titanic()\n >>> mosaic_plot(data, 'sex', 'survived')\n \"\"\"\n\n cont = pd.crosstab(data[cols], data[rows])\n sort = np.argsort((cont / cont.sum()).iloc[0])\n cont = cont.iloc[:, sort]\n if ax is None:\n ax = plt.gca()\n pos_y = 0\n positions_y = []\n n_cols = cont.shape[1]\n for i, col in enumerate(cont.columns):\n height = cont[col].sum()\n positions_y.append(pos_y + height / 2)\n\n pos_x = 0\n for j, row in enumerate(cont[col]):\n width = row / height\n color = plt.cm.tab10(j)\n if vary_lightness:\n color = _lighten_color(color, (i + 1) / (n_cols + 1))\n rect = Rectangle((pos_x, pos_y), width, height, edgecolor='k',\n facecolor=color)\n pos_x += width\n ax.add_patch(rect)\n pos_y += height\n\n ax.set_ylim(0, pos_y)\n ax.set_yticks(positions_y)\n ax.set_yticklabels(cont.columns)\n\n\ndef _lighten_color(color, amount=0.5):\n \"\"\"\n Lightens the given color by multiplying (1-luminosity) by the given amount.\n Input can be matplotlib color string, hex string, or RGB tuple.\n\n https://stackoverflow.com/questions/37765197/darken-or-lighten-a-color-in-matplotlib\n\n Examples:\n >> lighten_color('g', 0.3)\n >> lighten_color('#F034A3', 0.6)\n >> lighten_color((.3,.55,.1), 0.5)\n \"\"\"\n import matplotlib.colors as mc\n import colorsys\n c = color\n amount += 0.5\n c = colorsys.rgb_to_hls(*mc.to_rgb(c))\n return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])\n\n\ndef _get_n_top(features, name):\n if features.shape[1] > 20:\n print(\"Showing only top 10 of {} {} features\".format(\n features.shape[1], name))\n # too many features, show just top 10\n show_top = 10\n else:\n show_top = features.shape[1]\n return show_top\n\n\ndef _prune_categories(series, max_categories=10):\n series = series.astype('category')\n small_categories = series.value_counts()[max_categories:].index\n res = series.cat.remove_categories(small_categories)\n res = res.cat.add_categories(['dabl_other']).fillna(\"dabl_other\")\n return res\n\n\ndef _prune_category_make_X(X, col, target_col, max_categories=20):\n col_values = X[col]\n if col_values.nunique() > max_categories:\n # keep only top 10 categories if there are more than 20\n col_values = _prune_categories(col_values,\n max_categories=min(10, max_categories))\n X_new = X[[target_col]].copy()\n X_new[col] = col_values\n else:\n X_new = X.copy()\n X_new[col] = X_new[col].astype('category')\n return X_new\n\n\ndef _fill_missing_categorical(X):\n # fill in missing values in categorical variables with new category\n # ensure we use strings for object columns and number for integers\n X = X.copy()\n max_value = X.max(numeric_only=True).max()\n for col in X.columns:\n if X[col].dtype == 'object':\n X[col].fillna(\"dabl_missing\", inplace=True)\n else:\n X[col].fillna(max_value + 1, inplace=True)\n return X\n\n\ndef _make_subplots(n_plots, max_cols=5, row_height=3):\n \"\"\"Create a harmonious subplot grid.\n \"\"\"\n n_rows, n_cols = find_pretty_grid(n_plots, max_cols=max_cols)\n fig, axes = plt.subplots(n_rows, n_cols,\n figsize=(4 * n_cols, row_height * n_rows),\n constrained_layout=True)\n # we don't want ravel to fail, this is awkward!\n axes = np.atleast_2d(axes)\n return fig, axes\n\n\ndef _check_X_target_col(X, target_col, types=None, type_hints=None, task=None):\n if types is None:\n types = detect_types(X, type_hints=type_hints)\n if (not isinstance(target_col, str) and hasattr(target_col, '__len__') and\n len(target_col) > 1):\n raise ValueError(\"target_col should be a column in X, \"\n \"got {}\".format(target_col))\n if target_col not in X.columns:\n raise ValueError(\"{} is not a valid column of X\".format(target_col))\n\n if X[target_col].nunique() < 2:\n raise ValueError(\"Less than two classes present, {}, need at least two\"\n \" for classification.\".format(X.loc[0, target_col]))\n # FIXME we get target types here with detect_types,\n # but in the estimator with type_of_target\n if task == \"classification\" and not types.loc[target_col, 'categorical']:\n raise ValueError(\"Type for target column {} detected as {},\"\n \" need categorical for classification.\".format(\n target_col, types.T.idxmax()[target_col]))\n if task == \"regression\" and (not types.loc[target_col, 'continuous']):\n raise ValueError(\"Type for target column {} detected as {},\"\n \" need continuous for regression.\".format(\n target_col, types.T.idxmax()[target_col]))\n return types\n\n\ndef _short_tick_names(ax, label_length=20, ticklabel_length=10):\n \"\"\"Shorten axes labels and tick labels.\n\n Uses _shortname to change labels as a side effect.\n\n Parameters\n ----------\n ax : matplotlib axes\n Axes on which to shorten labels.\n label_length : int, default=20\n Length of xlabel and ylabel\n ticklabel_length : int, default=10\n Length of each label in xticklabels and yticklabels\n \"\"\"\n ax.set_xticklabels(\n [_shortname(t.get_text(), maxlen=ticklabel_length)\n for t in ax.get_xticklabels()]\n )\n ax.set_yticklabels(\n [_shortname(t.get_text(), maxlen=ticklabel_length)\n for t in ax.get_yticklabels()]\n )\n ax.set_xlabel(_shortname(ax.get_xlabel(), maxlen=label_length))\n ax.set_ylabel(_shortname(ax.get_ylabel(), maxlen=label_length))\n\n\ndef _find_scatter_plots_classification(X, target, how_many=3,\n random_state=None):\n # input is continuous\n # look at all pairs of features, find most promising ones\n # dummy = DummyClassifier(strategy='prior').fit(X, target)\n # baseline_score = recall_score(target, dummy.predict(X), average='macro')\n scores = []\n # converting to int here might save some time\n _, target = np.unique(target, return_inverse=True)\n # limit to 2000 training points for speed?\n train_size = min(2000, int(.9 * X.shape[0]))\n cv = StratifiedShuffleSplit(n_splits=3, train_size=train_size,\n random_state=random_state)\n for i, j in itertools.combinations(np.arange(X.shape[1]), 2):\n this_X = X[:, [i, j]]\n # assume this tree is simple enough so not be able to overfit in 2d\n # so we don't bother with train/test split\n tree = DecisionTreeClassifier(max_leaf_nodes=8)\n scores.append((i, j, np.mean(cross_val_score(\n tree, this_X, target, cv=cv, scoring='recall_macro'))))\n\n scores = pd.DataFrame(scores, columns=['feature0', 'feature1', 'score'])\n top = scores.sort_values(by='score').iloc[-how_many:][::-1]\n return top\n\n\ndef discrete_scatter(x, y, c, unique_c=None, legend='first',\n clip_outliers=True,\n alpha='auto', s='auto', ax=None, **kwargs):\n \"\"\"Scatter plot for categories.\n\n Creates a scatter plot for x and y grouped by c.\n\n\n Parameters\n ----------\n x : array-like\n x coordinates to scatter.\n y : array-like\n y coordinates to scatter.\n c : array-like\n Grouping of samples (similar to hue in seaborn).\n unique_c : array-like, default='None'\n Unique values of c considered in scatter. If not\n provided unique elements of c are determined.\n legend : bool, or \"first\", default=\"first\"\n Whether to create a legend. \"first\" mean only the\n first one in a given gridspec.\n clip_outliers : bool, default='True'\n Whether to clip outliers in x and y. The limits are\n determined based on 0.01 and 0.99 quantiles of x and\n y ignoring nan values.\n alpha : float, default='auto'\n Alpha values for scatter plots. 'auto' is dirty hacks.\n s : float, default='auto'.\n Marker size for scatter plots. 'auto' is dirty hacks.\n ax : matplotlib axes, default=None\n Axes to plot into.\n kwargs :\n Passed through to plt.scatter.\n\n Examples\n --------\n >>> import matplotlib.pyplot as plt\n >>> from dabl.datasets import load_ames\n >>> data = load_ames()\n >>> fig = plt.figure()\n >>> discrete_scatter(\n ... x=data[\"Year Built\"],\n ... y=data[\"SalePrice\"],\n ... c=data[\"Overall Qual\"],\n ... unique_c=[2, 4, 6, 8, 10],\n ... legend=True,\n ... alpha=0.3\n ... )\n \"\"\"\n alpha = _get_scatter_alpha(alpha, x)\n s = _get_scatter_size(s, x)\n if ax is None:\n ax = plt.gca()\n if legend == \"first\":\n legend = (ax.get_geometry()[2] == 1)\n if unique_c is None:\n unique_c = np.unique(c)\n for i in unique_c:\n mask = c == i\n ax.scatter(x[mask], y[mask], label=i, s=s, alpha=alpha, **kwargs)\n if clip_outliers:\n x_low, x_high = _inlier_range(x)\n y_low, y_high = _inlier_range(y)\n xlims = ax.get_xlim()\n ylims = ax.get_ylim()\n ax.set_xlim(max(x_low, xlims[0]), min(x_high, xlims[1]))\n ax.set_ylim(max(y_low, ylims[0]), min(y_high, ylims[1]))\n\n if legend:\n props = {}\n if len(unique_c) > 15:\n props['size'] = 6\n legend = ax.legend(prop=props)\n for handle in legend.legendHandles:\n handle.set_alpha(1)\n handle.set_sizes((100,))\n\n\ndef class_hists(data, column, target, bins=\"auto\", ax=None, legend=False,\n scale_separately=True):\n \"\"\"Grouped univariate histograms.\n\n Parameters\n ----------\n data : pandas DataFrame\n Input data to plot.\n column : column specifier\n Column in the data to compute histograms over (must be continuous).\n target : column specifier\n Target column in data, must be categorical.\n bins : string, int or array-like\n Number of bins, 'auto' or bin edges. Passed to np.histogram_bin_edges.\n We always show at least 5 bins for now.\n ax : matplotlib axes\n Axes to plot into.\n legend : boolean, default=False\n Whether to create a legend.\n scale_separately : boolean, default=True\n Whether to scale each class separately.\n\n Examples\n --------\n >>> from dabl.datasets import load_adult\n >>> data = load_adult()\n >>> class_hists(data, \"age\", \"gender\", legend=True)\n <matplotlib...\n \"\"\"\n col_data = data[column].dropna()\n\n if ax is None:\n ax = plt.gca()\n if col_data.nunique() > 10:\n ordinal = False\n # histograms\n bin_edges = np.histogram_bin_edges(col_data, bins=bins)\n if len(bin_edges) > 30:\n bin_edges = np.histogram_bin_edges(col_data, bins=30)\n\n counts = {}\n for name, group in data.groupby(target)[column]:\n this_counts, _ = np.histogram(group, bins=bin_edges)\n counts[name] = this_counts\n counts = pd.DataFrame(counts)\n else:\n ordinal = True\n # ordinal data, count distinct values\n counts = data.groupby(target)[column].value_counts().unstack(target)\n if scale_separately:\n # normalize by maximum\n counts = counts / counts.max()\n bottom = counts.max().max() * 1.1\n for i, name in enumerate(counts.columns):\n if ordinal:\n ax.bar(range(counts.shape[0]), counts[name], width=.9,\n bottom=bottom * i, tick_label=counts.index, linewidth=2,\n edgecolor='k', label=name)\n xmin, xmax = 0 - .5, counts.shape[0] - .5\n else:\n ax.bar(bin_edges[:-1], counts[name], bottom=bottom * i, label=name,\n align='edge', width=(bin_edges[1] - bin_edges[0]) * .9)\n xmin, xmax = bin_edges[0], bin_edges[-1]\n ax.hlines(bottom * i, xmin=xmin, xmax=xmax,\n linewidth=1)\n if legend:\n ax.legend()\n ax.set_yticks(())\n ax.set_xlabel(_shortname(column))\n return ax\n\n\ndef pairplot(data, target_col, columns=None, scatter_alpha='auto',\n scatter_size='auto'):\n \"\"\"Pairplot (scattermatrix)\n\n Because there's already too many implementations of this.\n This is meant for classification only.\n This is very bare-bones right now :-/\n\n Parameters\n ----------\n data : pandas dataframe\n Input data\n target_col : column specifier\n Target column in data.\n columns : column specifiers, default=None.\n Columns in data to include. None means all.\n scatter_alpha : float, default='auto'\n Alpha values for scatter plots. 'auto' is dirty hacks.\n scatter_size : float, default='auto'.\n Marker size for scatter plots. 'auto' is dirty hacks.\n \"\"\"\n if columns is None:\n columns = data.columns.drop(target_col)\n n_features = len(columns)\n fig, axes = plt.subplots(n_features, n_features,\n figsize=(n_features * 3, n_features * 3))\n axes = np.atleast_2d(axes)\n for ax, (i, j) in zip(axes.ravel(),\n itertools.product(range(n_features), repeat=2)):\n legend = i == 0 and j == n_features - 1\n if i == j:\n class_hists(data, columns[i], target_col, ax=ax.twinx())\n else:\n discrete_scatter(data[columns[j]], data[columns[i]],\n c=data[target_col], legend=legend, ax=ax,\n alpha=scatter_alpha,\n s=scatter_size)\n if j == 0:\n ax.set_ylabel(columns[i])\n else:\n ax.set_ylabel(\"\")\n ax.set_yticklabels(())\n if i == n_features - 1:\n ax.set_xlabel(_shortname(columns[j]))\n else:\n ax.set_xlabel(\"\")\n ax.set_xticklabels(())\n despine(fig)\n if n_features > 1:\n axes[0, 0].set_yticks(axes[0, 1].get_yticks())\n axes[0, 0].set_ylim(axes[0, 1].get_ylim())\n return axes\n\n\ndef _inlier_range(series):\n low = np.nanquantile(series, 0.01)\n high = np.nanquantile(series, 0.99)\n assert low <= high\n # the two is a complete hack\n inner_range = (high - low) / 2\n return low - inner_range, high + inner_range\n\n\ndef _find_inliers(series):\n low, high = _inlier_range(series)\n mask = series.between(low, high)\n mask = mask | series.isna()\n dropped = len(mask) - mask.sum()\n if dropped > 0:\n warn(\"Dropped {} outliers in column {}.\".format(\n int(dropped), series.name), UserWarning)\n return mask\n\n\ndef _clean_outliers(data):\n def _find_outliers_series(series):\n series = series.dropna()\n low = series.quantile(0.01)\n high = series.quantile(0.99)\n # the two is a complete hack\n inner_range = (high - low) / 2\n return series.between(low - inner_range, high + inner_range)\n mask = data.apply(_find_outliers_series)\n mask = mask.apply(lambda x: reduce(np.logical_and, x), axis=1).fillna(True)\n dropped = len(mask) - mask.sum()\n if dropped > 0:\n warn(\"Dropped {} outliers.\".format(int(dropped)), UserWarning)\n return mask\n return None\n\n\ndef _get_scatter_alpha(scatter_alpha, x):\n if scatter_alpha != \"auto\":\n return scatter_alpha\n if x.shape[0] < 100:\n return .9\n elif x.shape[0] < 1000:\n return .5\n elif x.shape[0] < 10000:\n return .2\n else:\n return .1\n\n\ndef _get_scatter_size(scatter_size, x):\n if scatter_size != \"auto\":\n return scatter_size\n if x.shape[0] < 100:\n return 30\n elif x.shape[0] < 1000:\n return 30\n elif x.shape[0] < 2000:\n return 10\n elif x.shape[0] < 10000:\n return 2\n else:\n return 1\n\n\ndef plot_multiclass_roc_curve(estimator, X_val, y_val):\n if len(estimator.classes_) < 3:\n raise ValueError(\"Only for multi-class\")\n try:\n y_score = estimator.predict_proba(X_val)\n except AttributeError:\n y_score = estimator.decision_function(X_val)\n fig, axes = _make_subplots(len(estimator.classes_))\n for i, (ax, c) in enumerate(zip(axes.ravel(), estimator.classes_)):\n fpr, tpr, _ = roc_curve(y_val == c, y_score[:, i])\n ax.plot(fpr, tpr)\n ax.set_xlabel(\"False Positive Rate\")\n ax.set_ylabel(\"True Positive Rate (recall)\")\n ax.set_title(\"ROC curve for class {}\".format(c))\n"
] | [
[
"numpy.asarray",
"pandas.DataFrame",
"sklearn.tree.DecisionTreeClassifier",
"numpy.mean",
"numpy.histogram",
"pandas.crosstab",
"matplotlib.pyplot.gca",
"matplotlib.colors.to_rgb",
"numpy.unique",
"numpy.arange",
"numpy.ceil",
"matplotlib.pyplot.cm.tab10",
"sklearn.model_selection.StratifiedShuffleSplit",
"matplotlib.patches.Rectangle",
"sklearn.metrics.roc_curve",
"numpy.atleast_2d",
"numpy.histogram_bin_edges",
"numpy.argsort",
"numpy.array",
"sklearn.model_selection.cross_val_score",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.nanquantile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
zhouzhibo0117/apollo | [
"f7a2369315a2bf106e473ced7392f4542f4ff3a3"
] | [
"modules/tools/plot_planning/imu_acc.py"
] | [
"#!/usr/bin/env python3\n\n###############################################################################\n# Copyright 2019 The Apollo Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n###############################################################################\n\nimport math\nfrom record_reader import RecordItemReader\n\n\nclass ImuAcc:\n\n def __init__(self):\n self.timestamp_list = []\n self.corrected_acc_list = []\n self.acc_list = []\n\n self.last_corrected_acc = None\n self.last_timestamp = None\n\n def add(self, location_est):\n timestamp = location_est.measurement_time\n acc = location_est.pose.linear_acceleration.x * \\\n math.cos(location_est.pose.heading) + \\\n location_est.pose.linear_acceleration.y * \\\n math.sin(location_est.pose.heading)\n\n if self.last_corrected_acc is not None:\n corrected_acc = self._correct_acc(acc, self.last_corrected_acc)\n else:\n corrected_acc = acc\n\n self.acc_list.append(acc)\n self.corrected_acc_list.append(corrected_acc)\n self.timestamp_list.append(timestamp)\n\n self.last_timestamp = timestamp\n self.last_corrected_acc = corrected_acc\n\n def get_acc_list(self):\n return self.acc_list\n\n def get_corrected_acc_list(self):\n return self.corrected_acc_list\n\n def get_timestamp_list(self):\n return self.timestamp_list\n\n def get_lastest_corrected_acc(self):\n if len(self.corrected_acc_list) > 0:\n return self.corrected_acc_list[-1]\n else:\n return None\n\n def get_lastest_acc(self):\n if len(self.acc_list) > 0:\n return self.acc_list[-1]\n else:\n return None\n\n def get_lastest_timestamp(self):\n if len(self.timestamp_list) > 0:\n return self.timestamp_list[-1]\n else:\n return None\n\n def _correct_acc(self, acc, last_acc):\n if last_acc is None:\n return last_acc\n delta = abs(acc - last_acc) / abs(last_acc)\n if delta > 0.4:\n corrected = acc / 2.0\n return corrected\n else:\n return acc\n\n\nif __name__ == \"__main__\":\n import sys\n import matplotlib.pyplot as plt\n from os import listdir\n from os.path import isfile, join\n\n folders = sys.argv[1:]\n fig, ax = plt.subplots()\n colors = [\"g\", \"b\", \"r\", \"m\", \"y\"]\n markers = [\"o\", \"o\", \"o\", \"o\"]\n for i in range(len(folders)):\n folder = folders[i]\n color = colors[i % len(colors)]\n marker = markers[i % len(markers)]\n fns = [f for f in listdir(folder) if isfile(join(folder, f))]\n for fn in fns:\n reader = RecordItemReader(folder+\"/\"+fn)\n processor = ImuAcc()\n last_pose_data = None\n last_chassis_data = None\n topics = [\"/apollo/localization/pose\"]\n for data in reader.read(topics):\n if \"pose\" in data:\n last_pose_data = data[\"pose\"]\n processor.add(last_pose_data)\n last_pose_data = None\n last_chassis_data = None\n\n data_x = processor.get_timestamp_list()\n data_y = processor.get_corrected_acc_list()\n ax.scatter(data_x, data_y, c=color, marker=marker, alpha=0.4)\n data_y = processor.get_acc_list()\n ax.scatter(data_x, data_y, c=\"k\", marker=\"+\", alpha=0.4)\n\n plt.show()\n"
] | [
[
"matplotlib.pyplot.show",
"matplotlib.pyplot.subplots"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
eavise-kul/lightnet | [
"d2d5d3fff8f929c3683c34f176217649375b98e1"
] | [
"test/test_networks.py"
] | [
"#\n# Test if network forward function runs\n# Copyright EAVISE\n#\n\nimport inspect\nimport pytest\nimport torch\nimport lightnet as ln\n\nclassification_networks = ['Darknet', 'Darknet19', 'Darknet53', 'MobileDarknet19', 'MobilenetV1', 'MobilenetV2']\nanchor_detection_networks = ['DYolo', 'MobilenetYolo', 'MobileYoloV2', 'MobileYoloV2Upsample', 'TinyYoloV2', 'TinyYoloV3', 'YoloV2', 'YoloV2Upsample', 'YoloV3', 'Yolt']\ncorner_detection_networks = ['Cornernet', 'CornernetSqueeze']\nspecial_networks = ['YoloFusion']\n\n\[email protected](scope='module')\ndef input_tensor():\n def _input_tensor(dimension, channels=3, batch=1):\n return torch.rand(batch, channels, dimension, dimension)\n return _input_tensor\n\n\n# Base classification networks\[email protected]('network', classification_networks)\ndef test_classification_cpu(network, input_tensor):\n uut = getattr(ln.models, network)(1000).eval()\n it = input_tensor(uut.inner_stride)\n\n output_tensor = uut(it)\n assert output_tensor.dim() == 2\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == uut.num_classes\n\n\[email protected]('network', classification_networks)\[email protected]\[email protected](not torch.cuda.is_available(), reason='CUDA not available')\ndef test_classification_cuda(network, input_tensor):\n uut = getattr(ln.models, network)(1000).to('cuda')\n it = input_tensor(uut.inner_stride, batch=2).to('cuda')\n\n output_tensor = uut(it)\n assert output_tensor.dim() == 2\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == uut.num_classes\n\n\n# Anchor detection networks\[email protected]('network', anchor_detection_networks)\ndef test_anchor_detection_cpu(network, input_tensor):\n uut = getattr(ln.models, network)(20).eval()\n it = input_tensor(uut.inner_stride)\n\n output_tensor = uut(it)\n if isinstance(output_tensor, torch.Tensor):\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n else:\n for i, tensor in enumerate(output_tensor):\n assert tensor.dim() == 4\n assert tensor.shape[0] == it.shape[0]\n assert tensor.shape[1] == len(uut.anchors[i]) * (5 + uut.num_classes)\n assert tensor.shape[2] == it.shape[2] // uut.stride[i]\n assert tensor.shape[3] == it.shape[3] // uut.stride[i]\n\n\[email protected]('network', anchor_detection_networks)\[email protected]\[email protected](not torch.cuda.is_available(), reason='CUDA not available')\ndef test_anchor_detection_cuda(network, input_tensor):\n uut = getattr(ln.models, network)(20).to('cuda')\n it = input_tensor(uut.inner_stride, batch=2).to('cuda')\n\n output_tensor = uut(it)\n if isinstance(output_tensor, torch.Tensor):\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n else:\n for i, tensor in enumerate(output_tensor):\n assert tensor.dim() == 4\n assert tensor.shape[0] == it.shape[0]\n assert tensor.shape[1] == len(uut.anchors[i]) * (5 + uut.num_classes)\n assert tensor.shape[2] == it.shape[2] // uut.stride[i]\n assert tensor.shape[3] == it.shape[3] // uut.stride[i]\n\n\n# Corner detection networks\[email protected]('network', corner_detection_networks)\ndef test_corner_detection_cpu(network, input_tensor):\n uut = getattr(ln.models, network)(20).eval()\n it = input_tensor(uut.inner_stride)\n\n output_tensor = uut(it)\n if isinstance(output_tensor, torch.Tensor):\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == (uut.num_classes + 3) * 2\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n else:\n for tensor in output_tensor:\n assert tensor.dim() == 4\n assert tensor.shape[0] == it.shape[0]\n assert tensor.shape[1] == (uut.num_classes + 3) * 2\n assert tensor.shape[2] == it.shape[2] // uut.stride\n assert tensor.shape[3] == it.shape[3] // uut.stride\n\n\[email protected]('network', corner_detection_networks)\[email protected]\[email protected](not torch.cuda.is_available(), reason='CUDA not available')\ndef test_corner_detection_cuda(network, input_tensor):\n uut = getattr(ln.models, network)(20).to('cuda')\n it = input_tensor(uut.inner_stride, batch=2).to('cuda')\n\n output_tensor = uut(it)\n if isinstance(output_tensor, torch.Tensor):\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == (uut.num_classes + 3) * 2\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n else:\n for tensor in output_tensor:\n assert tensor.dim() == 4\n assert tensor.shape[0] == it.shape[0]\n assert tensor.shape[1] == (uut.num_classes + 3) * 2\n assert tensor.shape[2] == it.shape[2] // uut.stride\n assert tensor.shape[3] == it.shape[3] // uut.stride\n\n\n# YoloFusion\ndef test_yolofusion_cpu(input_tensor):\n it = input_tensor(ln.models.YoloFusion.inner_stride, 4)\n\n for fusion in (0, 1, 10, 22, 27):\n uut = ln.models.YoloFusion(20, fuse_layer=fusion).eval()\n output_tensor = uut(it)\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n\n\[email protected]\[email protected](not torch.cuda.is_available(), reason='CUDA not available')\ndef test_yolofusion_cuda(input_tensor):\n it = input_tensor(ln.models.YoloFusion.inner_stride * 2, 4).to('cuda')\n\n for fusion in (0, 1, 10, 22, 27):\n uut = ln.models.YoloFusion(20, fuse_layer=fusion).to('cuda')\n output_tensor = uut(it)\n assert output_tensor.dim() == 4\n assert output_tensor.shape[0] == it.shape[0]\n assert output_tensor.shape[1] == len(uut.anchors) * (5 + uut.num_classes)\n assert output_tensor.shape[2] == it.shape[2] // uut.stride\n assert output_tensor.shape[3] == it.shape[3] // uut.stride\n\n\n# All networks tested?\ndef test_all_networks_tested():\n networks = [\n net for net in dir(ln.models)\n if (inspect.isclass(getattr(ln.models, net)))\n and (issubclass(getattr(ln.models, net), torch.nn.Module))\n ]\n\n tested_networks = set(\n anchor_detection_networks\n + corner_detection_networks\n + classification_networks\n + special_networks\n )\n for net in networks:\n if net not in tested_networks:\n raise NotImplementedError(f'Network [{net}] is not being tested!')\n"
] | [
[
"torch.rand",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zacharymartinot/ssht_numba | [
"12eb0130bd1debf22ea1b13d1f1eec1b1f4ca11c"
] | [
"tests/test_numba_wrappers.py"
] | [
"\"\"\"\nTests the numba wrapper functions against the equivalent pyssht functions\nusing random input data.\n\"\"\"\n\nimport numpy as np\nimport pyssht\n\nimport ssht_numba as sshtn\n\nL = 180\ns = -1\n\n\ndef test_everything():\n # Test indexing functions\n ind2elm_check = [pyssht.ind2elm(i) == sshtn.ind2elm(i) for i in range(L * L)]\n\n assert all(ind2elm_check), \"ind2elm functions do not match\"\n\n elm2ind_check = [\n pyssht.elm2ind(el, m) == sshtn.elm2ind(el, m)\n for el in range(L)\n for m in range(-el, el)\n ]\n\n assert all(elm2ind_check), \"elm2ind functions do not match\"\n\n assert pyssht.sample_shape(L, Method=\"MW\") == sshtn.mw_sample_shape(L)\n assert pyssht.sample_shape(L, Method=\"MWSS\") == sshtn.mwss_sample_shape(L)\n\n py_theta, py_phi = pyssht.sample_positions(L, Method=\"MW\", Grid=False)\n nb_theta, nb_phi = sshtn.mw_sample_positions(L)\n assert np.allclose(py_theta, nb_theta)\n assert np.allclose(py_phi, nb_phi)\n\n py_theta, py_phi = pyssht.sample_positions(L, Method=\"MWSS\", Grid=False)\n nb_theta, nb_phi = sshtn.mwss_sample_positions(L)\n\n assert np.allclose(py_theta, nb_theta)\n assert np.allclose(py_phi, nb_phi)\n\n py_ttheta, py_pphi = pyssht.sample_positions(L, Method=\"MW\", Grid=True)\n nb_ttheta, nb_pphi = sshtn.mw_sample_grid(L)\n assert np.allclose(py_ttheta, nb_ttheta)\n assert np.allclose(py_pphi, nb_pphi)\n\n py_ttheta, py_pphi = pyssht.sample_positions(L, Method=\"MWSS\", Grid=True)\n nb_ttheta, nb_pphi = sshtn.mwss_sample_grid(L)\n assert np.allclose(py_ttheta, nb_ttheta)\n assert np.allclose(py_pphi, nb_pphi)\n\n # Generate random flms (of complex signal).\n np.random.seed(89834)\n flm = np.random.randn(L * L) + 1j * np.random.randn(L * L)\n\n # Zero harmonic coefficients with el<|spin|.\n ind_min = np.abs(s) ** 2\n flm[0:ind_min] = 0.0 + 1j * 0.0\n\n # MW inverse complex transform\n f_py_mw = pyssht.inverse(flm, L, Spin=s, Method=\"MW\")\n\n f_nb_mw = np.empty(sshtn.mw_sample_shape(L), dtype=np.complex128)\n sshtn.mw_inverse_sov_sym(flm, L, s, f_nb_mw)\n\n assert np.allclose(f_py_mw, f_nb_mw)\n\n # MW forward complex transform, recovering input\n rec_flm_py_mw = pyssht.forward(f_py_mw, L, Spin=s, Method=\"MW\")\n\n rec_flm_nb_mw = np.empty(L * L, dtype=np.complex128)\n sshtn.mw_forward_sov_conv_sym(f_nb_mw, L, s, rec_flm_nb_mw)\n\n assert np.allclose(rec_flm_py_mw, rec_flm_nb_mw)\n assert np.allclose(rec_flm_nb_mw, flm)\n\n # MW forward real transform\n\n f_re = np.random.randn(*sshtn.mw_sample_shape(L))\n\n flm_py_re_mw = pyssht.forward(f_re, L, Spin=0, Method=\"MW\", Reality=True)\n\n flm_nb_re_mw = np.empty(L * L, dtype=np.complex128)\n sshtn.mw_forward_sov_conv_sym_real(f_re, L, flm_nb_re_mw)\n\n assert np.allclose(flm_py_re_mw, flm_nb_re_mw)\n\n # MW inverse real transform\n rec_f_re_py = pyssht.inverse(flm_py_re_mw, L, Spin=0, Method=\"MW\", Reality=True)\n\n rec_f_re_nb = np.empty(sshtn.mw_sample_shape(L), dtype=np.float64)\n sshtn.mw_inverse_sov_sym_real(flm_nb_re_mw, L, rec_f_re_nb)\n\n assert np.allclose(rec_f_re_py, rec_f_re_nb)\n # Note that rec_f_re_{py,nb} != f_re since f_re is not band-limited at L\n\n # MWSS invserse complex transform\n f_py_mwss = pyssht.inverse(flm, L, Spin=s, Method=\"MWSS\", Reality=False)\n\n f_nb_mwss = np.empty(sshtn.mwss_sample_shape(L), dtype=np.complex128)\n sshtn.mw_inverse_sov_sym_ss(flm, L, s, f_nb_mwss)\n\n assert np.allclose(f_py_mwss, f_nb_mwss)\n\n # MWSS forward complex transform\n rec_flm_py_mwss = pyssht.forward(f_py_mwss, L, Spin=s, Method=\"MWSS\", Reality=False)\n\n rec_flm_nb_mwss = np.empty(L * L, dtype=np.complex128)\n sshtn.mw_forward_sov_conv_sym_ss(f_nb_mwss, L, s, rec_flm_nb_mwss)\n\n assert np.allclose(rec_flm_py_mwss, rec_flm_nb_mwss)\n assert np.allclose(rec_flm_nb_mwss, flm)\n\n # MWSS forward real transform\n\n f_re2 = np.random.randn(*sshtn.mwss_sample_shape(L))\n\n flm_py_re_mwss = pyssht.forward(f_re2, L, Spin=0, Method=\"MWSS\", Reality=True)\n\n flm_nb_re_mwss = np.empty(L * L, dtype=np.complex128)\n sshtn.mw_forward_sov_conv_sym_ss_real(f_re2, L, flm_nb_re_mwss)\n\n assert np.allclose(flm_py_re_mwss, flm_nb_re_mwss)\n\n # MWSS inverse real transform\n\n rec_f_re_py_mwss = pyssht.inverse(\n flm_py_re_mwss, L, Spin=0, Method=\"MWSS\", Reality=True\n )\n\n rec_f_re_nb_mwss = np.empty(sshtn.mwss_sample_shape(L), dtype=np.float64)\n sshtn.mw_inverse_sov_sym_ss_real(flm_nb_re_mwss, L, rec_f_re_nb_mwss)\n\n assert np.allclose(rec_f_re_py_mwss, rec_f_re_nb_mwss)\n\n assert np.allclose(\n pyssht.generate_dl(np.pi / 2, 10), sshtn.generate_dl(np.pi / 2, 10)\n )\n"
] | [
[
"numpy.abs",
"numpy.allclose",
"numpy.random.seed",
"numpy.random.randn",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bingrao/advance_optimization | [
"cb3706bdd28714ed1ccb28951c6b9d8f6b58045a"
] | [
"max-coverage/src/tools/algorithms.py"
] | [
"import random\r\nimport numpy as np\r\n\r\nfrom utlis.data import get_mask\r\nfrom utlis.data import get_binary_array_with_sum\r\nfrom utlis.data import convert_numpy\r\n\r\n\r\n# nums: the numbers of sub-set selected\r\n# data: the input data\r\ndef random_coverage(nums, data):\r\n cover = []\r\n covered = set()\r\n for i in range(nums):\r\n # Get a sub-set from input data randomly\r\n r = data.pop(random.randint(0, len(data)-1))\r\n\r\n # Append selected sub-set elements into the cover\r\n # array, the repeat element will be removed.\r\n cover.append(r)\r\n\r\n # |= is to | as += is to +, i.e. a combination of\r\n # operation and asignment.\r\n covered |= r\r\n return cover, covered\r\n\r\n\r\n# nums: the numbers of sub-set selected\r\n# data: the input data\r\ndef greedy_max_coverage(nums, data):\r\n covered = set()\r\n cover = []\r\n for i in range(nums):\r\n max_subset = max(data, key=lambda x: len(x - covered))\r\n cover.append(max_subset)\r\n covered |= max_subset\r\n return cover, covered\r\n\r\n\r\n# nums: the numbers of sub-set selected\r\n# data: the input data, should be a list, rather a set\r\ndef pipage_rounding(nums, data):\r\n data = convert_numpy(data)\r\n universe = np.unique(data) # A 1D numpy array k*1\r\n mask = get_mask(data,universe)\r\n\r\n # m: the total number of unique elements in [[data]]\r\n m = universe.shape[0]\r\n # n: the total number of elements in data\r\n n = data.shape[0]\r\n # x_vector = np.random.randint(2, size=n)\r\n\r\n # x: A fractional x in [0,1]^n, where n is len(data)\r\n def function(x):\r\n total = 0\r\n for i in range(0, m):\r\n mul = 1\r\n for j in range(0, n):\r\n if mask[j, i]:\r\n mul = mul * (1 - x[j])\r\n total += (1 - mul)\r\n return total\r\n\r\n # x: A fractional x in [0,1]^n, where n is len(data)\r\n # p: A polytope, that is P = {x in [0,1]^n, 1 =< j <= n: sum x_j = k} where n is len(data)\r\n # f: A object function F(x) to evaluate F(x), F(x+ad), F(x-bd).\r\n def run(f):\r\n possible_solution = np.array(get_binary_array_with_sum(n,nums))\r\n nums_solutions = possible_solution.shape[0] % n\r\n\r\n x = possible_solution[np.random.randint(0, nums_solutions)]\r\n p = np.random.randint(0, nums_solutions)\r\n e_p = possible_solution[p]\r\n q = np.random.randint(0, nums_solutions)\r\n e_q = possible_solution[q]\r\n\r\n # x = np.pad(np.ones(nums, dtype=int), (0, n - nums), 'constant', constant_values=0)\r\n # np.random.shuffle(x)\r\n for i in range(0, n):\r\n # e_p = np.pad(np.ones(nums, dtype=int), (0, n - nums), 'constant', constant_values=0)\r\n # e_q = np.pad(np.ones(nums, dtype=int), (0, n - nums), 'constant', constant_values=0)\r\n # np.random.shuffle(e_p)\r\n # np.random.shuffle(e_q)\r\n d_x = e_p - e_q\r\n alpha_x = min(1 - x[p], x[q])\r\n beta_x = min(1 - x[q], x[p])\r\n x1 = x + alpha_x * d_x\r\n x2 = x - beta_x * d_x\r\n if f(x1) >= f(x):\r\n x = x1\r\n else:\r\n x = x2\r\n return x\r\n return run(function)\r\n\r\n# # nums: the numbers of sub-set selected\r\n# # # data: the input data, should be a list, rather a set\r\n# # def pipage_rounding(nums, data):\r\n# # # x: A fractional x in [0,1]^n, where n is len(data)\r\n# # # p: A polytope, that is P = {x in [0,1]^n, 1 =< j <= n: sum x_j = k} where n is len(data)\r\n# # # f: A object function F(x) to evaluate F(x), F(x+ad), F(x-bd).\r\n# # def run(x, f):\r\n# # b = np.random.randint(2, size=(n, 2*n))\r\n# # matriod = Matroid(Matrix(GF(2), b))\r\n# # bases = matriod.bases()\r\n# # length = len(bases)\r\n# # a, b = np.random.randint(0, length, 2)\r\n# # if a == b:\r\n# # b = (a + b) % length\r\n# # p = np.array(Matrix(bases[a]) % 2)\r\n# # q = np.array(Matrix(bases[b]) % 2)\r\n# # d_x = p - q\r\n# # alpha_x = min(1-x[a], x[b])\r\n# # beta_x = min(1-x[b], x[a])\r\n# # for i in range(0, n):\r\n# # x1 = x + alpha_x * d_x\r\n# # x2 = x - beta_x * d_x\r\n# # if f(x1) >= f(x):\r\n# # x = x1\r\n# # else:\r\n# # x = x2\r\n# #\r\n# # return x\r\n# #\r\n# # # x: A fractional x in [0,1]^n, where n is len(data)\r\n# # def function(x):\r\n# # total = 0\r\n# # for i in range(0, m):\r\n# # ele = universe[i]\r\n# # mul = 1\r\n# # for j in range(0,n):\r\n# # ele_set = data[j]\r\n# # if ele in ele_set:\r\n# # mul **= (1 - x[j])\r\n# # total += (1 - mul)\r\n# # return total\r\n# #\r\n# # universe = list(set(itertools.chain(*data)))\r\n# # # m: the total number of unique elements in [[data]]\r\n# # m = len(universe)\r\n# # # n: the total number of elements in data\r\n# # n = len(data)\r\n# # x_vector = np.random.randint(2, size=n)\r\n# # return run(x_vector, function)\r\n\r\n"
] | [
[
"numpy.random.randint",
"numpy.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cedricconol/texthero | [
"b73ef44911205cdb19b9b60c9d40eba54989c494"
] | [
"tests/test_representation.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom texthero import representation\nfrom texthero import preprocessing\n\nfrom . import PandasTestCase\n\nimport doctest\nimport unittest\nimport string\nimport math\nimport warnings\n\n\"\"\"\nTest doctest\n\"\"\"\n\n\ndef load_tests(loader, tests, ignore):\n tests.addTests(doctest.DocTestSuite(representation))\n return tests\n\n\nclass TestRepresentation(PandasTestCase):\n \"\"\"\n Count.\n \"\"\"\n\n def test_count_single_document(self):\n s = pd.Series(\"a b c c\")\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1, 1, 2]])\n self.assertEqual(representation.count(s), s_true)\n\n def test_count_multiple_documents(self):\n s = pd.Series([\"doc_one\", \"doc_two\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1, 0], [0, 1]])\n self.assertEqual(representation.count(s), s_true)\n\n def test_count_not_lowercase(self):\n s = pd.Series([\"one ONE\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1, 1]])\n self.assertEqual(representation.count(s), s_true)\n\n def test_count_punctuation_are_kept(self):\n s = pd.Series([\"one !\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1, 1]])\n self.assertEqual(representation.count(s), s_true)\n\n def test_count_not_tokenized_yet(self):\n s = pd.Series(\"a b c c\")\n s_true = pd.Series([[1, 1, 2]])\n\n with warnings.catch_warnings(): # avoid print warning\n warnings.simplefilter(\"ignore\")\n self.assertEqual(representation.count(s), s_true)\n\n with self.assertWarns(DeprecationWarning): # check raise warning\n representation.count(s)\n\n \"\"\"\n TF-IDF\n \"\"\"\n\n def test_tfidf_formula(self):\n s = pd.Series([\"Hi Bye\", \"Test Bye Bye\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series(\n [\n [\n 1.0 * (math.log(3 / 3) + 1),\n 1.0 * (math.log(3 / 2) + 1),\n 0.0 * (math.log(3 / 2) + 1),\n ],\n [\n 2.0 * (math.log(3 / 3) + 1),\n 0.0 * (math.log(3 / 2) + 1),\n 1.0 * (math.log(3 / 2) + 1),\n ],\n ]\n )\n s_true.rename_axis(\"document\", inplace=True)\n self.assertEqual(representation.tfidf(s), s_true)\n\n def test_tfidf_single_document(self):\n s = pd.Series(\"a\", index=[\"yo\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1]], index=[\"yo\"])\n s_true.rename_axis(\"document\", inplace=True)\n self.assertEqual(representation.tfidf(s), s_true)\n\n def test_tfidf_not_tokenized_yet(self):\n s = pd.Series(\"a\")\n s_true = pd.Series([[1]])\n s_true.rename_axis(\"document\", inplace=True)\n\n with warnings.catch_warnings(): # avoid print warning\n warnings.simplefilter(\"ignore\")\n self.assertEqual(representation.tfidf(s), s_true)\n\n with self.assertWarns(DeprecationWarning): # check raise warning\n representation.tfidf(s)\n\n def test_tfidf_single_not_lowercase(self):\n s = pd.Series(\"ONE one\")\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[1.0, 1.0]])\n s_true.rename_axis(\"document\", inplace=True)\n self.assertEqual(representation.tfidf(s), s_true)\n\n \"\"\"\n Term Frequency\n \"\"\"\n\n def test_term_frequency_single_document(self):\n s = pd.Series(\"a b c c\")\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[0.25, 0.25, 0.5]])\n self.assertEqual(representation.term_frequency(s), s_true)\n\n def test_term_frequency_multiple_documents(self):\n s = pd.Series([\"doc_one\", \"doc_two\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[0.5, 0.0], [0.0, 0.5]])\n self.assertEqual(representation.term_frequency(s), s_true)\n\n def test_term_frequency_not_lowercase(self):\n s = pd.Series([\"one ONE\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[0.5, 0.5]])\n self.assertEqual(representation.term_frequency(s), s_true)\n\n def test_term_frequency_punctuation_are_kept(self):\n s = pd.Series([\"one !\"])\n s = preprocessing.tokenize(s)\n s_true = pd.Series([[0.5, 0.5]])\n self.assertEqual(representation.term_frequency(s), s_true)\n\n def test_term_frequency_not_tokenized_yet(self):\n s = pd.Series(\"a b c c\")\n s_true = pd.Series([[0.25, 0.25, 0.5]])\n\n with warnings.catch_warnings(): # avoid print warning\n warnings.simplefilter(\"ignore\")\n self.assertEqual(representation.term_frequency(s), s_true)\n\n with self.assertWarns(DeprecationWarning): # check raise warning\n representation.term_frequency(s)\n"
] | [
[
"pandas.Series"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
awacha/mdscripts | [
"831bda06557fa2d5f0899fc2f6552c9e49146cef"
] | [
"src/mdscripts/extract_energy.py"
] | [
"#!/usr/bin/env python\n\nimport os\nimport re\nimport subprocess\nimport sys\nimport tempfile\n\nimport numpy as np\nimport scipy.signal\nfrom PyQt5 import QtCore, QtWidgets\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg, NavigationToolbar2QT\nfrom matplotlib.figure import Figure\n\nfrom .extract_energy_ui import Ui_gmx_extract_energy\n\n\nclass CurvesModel(QtCore.QAbstractItemModel):\n # columns: name, show in left axis, show in right axis,\n def __init__(self, labels):\n super().__init__()\n self._rows = [{'name': l, 'showonleft': True, 'showonright': False, 'factor': 1.0} for l in labels]\n\n def columnCount(self, parent=None):\n return 4\n\n def data(self, index: QtCore.QModelIndex, role=None):\n row = self._rows[index.row()]\n if index.column() == 0:\n if role == QtCore.Qt.DisplayRole:\n return row['name']\n else:\n return None\n elif index.column() == 1:\n if role == QtCore.Qt.CheckStateRole:\n return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][row['showonleft']]\n elif index.column() == 2:\n if role == QtCore.Qt.CheckStateRole:\n return [QtCore.Qt.Unchecked, QtCore.Qt.Checked][row['showonright']]\n elif index.column() == 3:\n if role == QtCore.Qt.DisplayRole:\n return '{:g}'.format(row['factor'])\n else:\n return None\n\n def flags(self, index: QtCore.QModelIndex):\n row = self._rows[index.row()]\n if index.column() == 0:\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable\n if index.column() == 1 or index.column() == 2:\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsUserCheckable | QtCore.Qt.ItemIsSelectable\n if index.column() == 3:\n return QtCore.Qt.ItemIsEditable | QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable\n else:\n return QtCore.Qt.NoItemFlags\n\n\n def index(self, row, col, parent=None):\n return self.createIndex(row, col, None)\n\n def rowCount(self, parent=None):\n return len(self._rows)\n\n def parent(self, index: QtCore.QModelIndex = None):\n return QtCore.QModelIndex()\n\n def setData(self, index: QtCore.QModelIndex, newvalue, role=None):\n row = self._rows[index.row()]\n if index.column() == 1:\n row['showonleft'] = newvalue == QtCore.Qt.Checked\n self.dataChanged.emit(self.index(index.row(), 1), self.index(index.row(), 1))\n return True\n elif index.column() == 2:\n row['showonright'] = newvalue == QtCore.Qt.Checked\n self.dataChanged.emit(self.index(index.row(), 2), self.index(index.row(), 2))\n return True\n return False\n\n def headerData(self, column, orientation, role=None):\n if orientation != QtCore.Qt.Horizontal:\n return None\n if role == QtCore.Qt.DisplayRole:\n return ['Name', 'Left', 'Right', 'Scaling factor'][column]\n\n def showOnLeft(self, row):\n return self._rows[row]['showonleft']\n\n def showOnRight(self, row):\n return self._rows[row]['showonright']\n\n def factor(self, row):\n return self._rows[row]['factor']\n\n def hideAll(self):\n for r in self._rows:\n r['showonleft'] = r['showonright'] = False\n self.dataChanged.emit(self.index(0, 1), self.index(self.rowCount(), 2))\n\n\nclass StatisticsModel(QtCore.QAbstractItemModel):\n # columns: name, mean, median, trend, std, std (pcnt), ptp, ptp (pcnt),\n def __init__(self, data, labels, tmin=None, tmax=None):\n super().__init__()\n self.data = data\n self.labels = labels\n if tmin is None:\n tmin = data[:, 0].min()\n if tmax is None:\n tmax = data[:, 0].max()\n self.tmin = tmin\n self.tmax = tmax\n\n def columnCount(self, parent=None):\n return 8\n\n def data(self, index: QtCore.QModelIndex, role=None):\n datacolumn = index.row() + 1\n dataidx = np.logical_and(self.data[:, 0] >= self.tmin, self.data[:, 0] <= self.tmax)\n data = self.data[dataidx, datacolumn]\n if role != QtCore.Qt.DisplayRole:\n return None\n if index.column() == 0:\n return self.labels[datacolumn]\n elif index.column() == 1:\n return str(np.mean(data))\n elif index.column() == 2:\n return str(np.median(data))\n elif index.column() == 3:\n coeffs = np.polyfit(self.data[dataidx, 0], data, 1)\n return str(coeffs[0])\n elif index.column() == 4:\n return str(np.std(data))\n elif index.column() == 5:\n return str(np.std(data) / np.mean(data) * 100)\n elif index.column() == 6:\n return str(np.ptp(data))\n elif index.column() == 7:\n return str(np.ptp(data) / np.mean(data) * 100)\n else:\n return None\n\n def flags(self, index: QtCore.QModelIndex):\n return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemNeverHasChildren | QtCore.Qt.ItemIsSelectable\n\n def index(self, row, col, parent=None):\n return self.createIndex(row, col, None)\n\n def rowCount(self, parent=None):\n return self.data.shape[1] - 1\n\n def parent(self, index: QtCore.QModelIndex = None):\n return QtCore.QModelIndex()\n\n def headerData(self, column, orientation, role=None):\n if orientation != QtCore.Qt.Horizontal:\n return None\n if role == QtCore.Qt.DisplayRole:\n return ['Name', 'Mean', 'Median', 'Trend', 'STD', 'STD %', 'P2P', 'P2P %'][column]\n\n def setTmin(self, value):\n self.tmin = value\n self.dataChanged.emit(self.index(0, 1), self.index(self.rowCount(), self.columnCount()),\n [QtCore.Qt.DisplayRole])\n\n def setTmax(self, value):\n self.tmax = value\n self.dataChanged.emit(self.index(0, 1), self.index(self.rowCount(), self.columnCount()),\n [QtCore.Qt.DisplayRole])\n\n\nclass MainWindow(QtWidgets.QWidget, Ui_gmx_extract_energy):\n windowfunctions = {'barthann': 'Bartlett-Hann',\n 'bartlett': 'Bartlett',\n 'blackman': 'Blackman',\n 'blackmanharris': 'Blackman-Harris',\n 'bohman': 'Bohman',\n 'boxcar': 'Rectangular',\n 'cosine': 'Cosine',\n 'flattop': 'Flat top',\n 'hamming': 'Hamming',\n 'hann': 'Hann',\n 'nuttall': 'Nuttall',\n 'parzen': 'Parzen',\n 'triang': 'Triangular',\n 'tukey': 'Tukey (tapered cosine)',\n }\n\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n self.cursor = None\n self.setupUi(self)\n\n def setupUi(self, Form):\n Ui_gmx_extract_energy.setupUi(self, Form)\n Form.fsmodel = QtWidgets.QFileSystemModel()\n Form.treeViewOpenFile.setModel(Form.fsmodel)\n Form.fsmodel.setNameFilters(['*.edr'])\n Form.fsmodel.setRootPath('/')\n Form.treeViewOpenFile.hideColumn(1)\n Form.treeViewOpenFile.hideColumn(2)\n Form.treeViewOpenFile.hideColumn(3)\n Form.fsmodel.sort(0, QtCore.Qt.AscendingOrder)\n Form.treeViewOpenFile.expand(Form.fsmodel.index(os.getcwd()))\n Form.treeViewOpenFile.setCurrentIndex(Form.fsmodel.index(os.getcwd()))\n Form.treeViewOpenFile.scrollTo(Form.fsmodel.index(os.getcwd()), QtWidgets.QAbstractItemView.PositionAtTop)\n Form.treeViewOpenFile.activated.connect(Form.onFileSelected)\n Form.figure = Figure()\n Form.figureCanvas = FigureCanvasQTAgg(Form.figure)\n Form.verticalLayoutFigure.addWidget(Form.figureCanvas)\n Form.navigationToolBar = NavigationToolbar2QT(Form.figureCanvas, Form)\n Form.verticalLayoutFigure.addWidget(Form.navigationToolBar)\n Form.hideAllPushButton.clicked.connect(Form.hideAll)\n Form.toolButtonGoFirst.clicked.connect(\n lambda: Form.horizontalSliderCursor.triggerAction(Form.horizontalSliderCursor.SliderToMinimum))\n Form.toolButtonGoLast.clicked.connect(\n lambda: Form.horizontalSliderCursor.triggerAction(Form.horizontalSliderCursor.SliderToMaximum))\n Form.toolButtonGoNext.clicked.connect(\n lambda: Form.horizontalSliderCursor.triggerAction(Form.horizontalSliderCursor.SliderSingleStepAdd))\n Form.toolButtonGoPrevious.clicked.connect(\n lambda: Form.horizontalSliderCursor.triggerAction(Form.horizontalSliderCursor.SliderSingleStepSub))\n Form.tminSlider.valueChanged.connect(Form.onTminSliderValueChanged)\n Form.tmaxSlider.valueChanged.connect(Form.onTmaxSliderValueChanged)\n Form.smoothingSlider.valueChanged.connect(Form.onSmoothingChanged)\n index = 0\n for i, w in enumerate(sorted(self.windowfunctions)):\n Form.smoothingFunctionComboBox.addItem(self.windowfunctions[w])\n if w == 'boxcar':\n index = i\n Form.smoothingFunctionComboBox.setCurrentIndex(index)\n Form.smoothingFunctionComboBox.currentIndexChanged.connect(lambda *args: self.replot())\n\n def onSmoothingChanged(self, smoothing):\n self.replot()\n\n def onFileSelected(self, index):\n assert isinstance(self.fsmodel, QtWidgets.QFileSystemModel)\n filename = self.fsmodel.filePath(index)\n self.openFile(filename)\n\n def openFile(self, filename):\n self.setCurveData(*extract_energy(filename))\n\n def hideAll(self):\n try:\n self.curveModel.hideAll()\n except AttributeError:\n pass\n\n def setCurveData(self, data, labels):\n self.data = data\n self.labels = labels\n this_is_the_first_model = not hasattr(self, 'curveModel')\n self.curveModel = CurvesModel(self.labels[1:])\n self.treeViewCurves.setModel(self.curveModel)\n self.curveModel.dataChanged.connect(self.curveModelDataChanged)\n self.statModel = StatisticsModel(self.data, self.labels)\n self.statisticsTreeView.setModel(self.statModel)\n if this_is_the_first_model:\n for col in range(1, self.curveModel.columnCount()):\n self.treeViewCurves.resizeColumnToContents(col)\n for col in range(1, self.statModel.columnCount()):\n self.statisticsTreeView.resizeColumnToContents(col)\n self.setScalerLimits()\n self.replot()\n\n def setScalerLimits(self):\n self.horizontalSliderCursor.setMinimum(0)\n self.horizontalSliderCursor.setMaximum(self.data.shape[0] - 1)\n self.tminSlider.setMinimum(0)\n self.tminSlider.setMaximum(self.data.shape[0] - 1)\n self.tmaxSlider.setMinimum(0)\n self.tmaxSlider.setMaximum(self.data.shape[0] - 1)\n self.tmaxSlider.setValue(self.data.shape[0] - 1)\n self.tminSlider.setValue(0)\n self.tminSpinBox.setMinimum(self.data[0, 0])\n self.tminSpinBox.setMaximum(self.data[-1, 0])\n self.tmaxSpinBox.setMinimum(self.data[0, 0])\n self.tmaxSpinBox.setMaximum(self.data[-1, 0])\n self.smoothingSlider.setMinimum(0)\n self.smoothingSlider.setMaximum(int(np.floor(0.5 * (self.data.shape[0] - 1))))\n\n def onTminSliderValueChanged(self, value):\n if value > self.tmaxSlider.value():\n self.tminSlider.setValue(self.tmaxSlider.value())\n self.tminSpinBox.setValue(self.cursorposToTime(self.tminSlider.value()))\n self.statModel.setTmin(self.cursorposToTime(self.tminSlider.value()))\n\n def cursorposToTime(self, cursorpos):\n return self.data[:, 0][cursorpos]\n\n def timeToCursorpos(self, time):\n return np.searchsorted(self.data[:, 0], time, side='right')\n\n def onTmaxSliderValueChanged(self, value):\n if value < self.tminSlider.value():\n self.tmaxSlider.setValue(self.tminSlider.value())\n self.tmaxSpinBox.setValue(self.cursorposToTime(self.tmaxSlider.value()))\n self.statModel.setTmax(self.cursorposToTime(self.tmaxSlider.value()))\n\n def curveModelDataChanged(self, idx1, idx2, roles):\n self.replot()\n\n def smoothingWindowName(self):\n return [k for k in self.windowfunctions\n if self.windowfunctions[k] == self.smoothingFunctionComboBox.currentText()][0]\n\n\n def replot(self):\n smoothing = 2 * self.smoothingSlider.value() + 1\n if smoothing < 3:\n smoothing = None\n assert isinstance(self.figure, Figure)\n self.figure.clear()\n axesleft = self.figure.add_subplot(1, 1, 1)\n axesleft.set_xlabel(self.labels[0])\n axesright = axesleft.twinx()\n lines = []\n labels = []\n for i in range(1, len(self.labels)):\n if smoothing is not None:\n window = scipy.signal.get_window(self.smoothingWindowName(), smoothing)\n curve = scipy.signal.fftconvolve(self.data[:, i], window, 'valid') / window.sum()\n # smoothing = 2*n+1. Cut n points from both the left and the right side of x.\n n = (smoothing - 1) // 2\n x = self.data[n:-n, 0]\n else:\n curve = self.data[:, i]\n x = self.data[:, 0]\n if self.curveModel.showOnLeft(i - 1):\n lines.append(axesleft.plot(x, curve, label=self.labels[i])[0])\n labels.append(self.labels[i])\n if self.curveModel.showOnRight(i - 1):\n lines.append(axesright.plot(x, curve, label=self.labels[i])[0])\n labels.append(self.labels[i])\n self.figure.legend(lines, labels)\n self.figureCanvas.draw()\n\n\ndef extract_energy(edrfile, structurefile=None, outputfile=None):\n handle = None\n if outputfile is None:\n handle, outputfile = tempfile.mkstemp('.xvg')\n os.close(handle)\n popenargs = ['gmx', 'energy', '-o', outputfile, '-f', edrfile]\n if structurefile is not None:\n popenargs.extend(['-s', structurefile])\n gmx_energy = subprocess.Popen(popenargs, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE,\n universal_newlines=True)\n try:\n outs, errs = gmx_energy.communicate('\\n'.join([str(x) for x in range(1, 100)]), timeout=200)\n except subprocess.TimeoutExpired:\n gmx_energy.kill()\n outs, errs = gmx_energy.communicate()\n raise RuntimeError('Timeout in \"gmx energy\"')\n assert gmx_energy.returncode is not None\n if gmx_energy.returncode:\n print(errs)\n raise RuntimeError('Nonzero exit code from \"gmx energy\": {}'.format(gmx_energy.returncode))\n data, labels = read_xvg(outputfile)\n if handle is not None:\n os.unlink(outputfile)\n return data, labels\n\n\ndef read_xvg(filename):\n data = []\n labels = ['Time (ps)']\n with open(filename, 'rt', encoding='utf-8') as f:\n while f:\n l = f.readline()\n if not l:\n break\n if l.startswith('#'):\n continue\n m = re.match('^@ s(\\d+) legend \"(?P<legend>.*)\"$', l)\n if m:\n labels.append(m.group('legend'))\n continue\n if l.startswith('@'):\n continue\n data.append([float(x) for x in l.split()])\n return np.array(data), labels\n\n\ndef run():\n import argparse\n parser = argparse.ArgumentParser(description=\"Extract all curves from a gromacs .edr file\")\n parser.add_argument('-f', action='store', type=str, help='.edr file', default='energy.edr')\n parser.add_argument('-o', action='store', type=str, help='output file', default='energy.xvg')\n parser.add_argument('-w', action='store_const', const=True, help='View results', default=False)\n parser.add_argument('-s', action='store', nargs=\"?\", required=False, type=str,\n help='.tpr file', const='topol.tpr', default=None)\n args = vars(parser.parse_args())\n\n data, labels = extract_energy(args['f'], args['s'], args['o'])\n if args[\"w\"]:\n from PyQt5.QtWidgets import QApplication\n app = QApplication([sys.argv[0]])\n mw = MainWindow()\n mw.setCurveData(data, labels)\n mw.show()\n app.exec_()\n"
] | [
[
"numpy.polyfit",
"matplotlib.figure.Figure",
"numpy.median",
"numpy.ptp",
"matplotlib.backends.backend_qt5agg.NavigationToolbar2QT",
"numpy.std",
"numpy.mean",
"numpy.searchsorted",
"numpy.floor",
"matplotlib.backends.backend_qt5agg.FigureCanvasQTAgg",
"numpy.array",
"numpy.logical_and"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jdossgollin/2018-paraguay-floods | [
"75a001e01f7cc7a725a0daafe2749d26ace71a93"
] | [
"src/get/download_mjo.py"
] | [
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"Download raw MJO data from\nhttp://www.bom.gov.au/climate/mjo/graphics/rmm.74toRealtime.txt\nand parse it\n\"\"\"\n\nimport argparse\nimport os\nfrom datetime import datetime\nimport pandas as pd\nimport xarray as xr\n\nparser = argparse.ArgumentParser() # pylint: disable=C0103\nparser.add_argument(\"--syear\", help=\"the first year to retain\")\nparser.add_argument(\"--eyear\", help=\"the last year to retain\")\nparser.add_argument(\"--outfile\", help=\"the path to the raw MJO data\")\nparser.add_argument(\"--infile\", help=\"the filename of the data to save\")\n\n\ndef download_data(sdate, edate, infile, outfile):\n \"\"\"Download the MJO data\"\"\"\n col_names = [\"year\", \"month\", \"day\", \"RMM1\", \"RMM2\", \"phase\", \"amplitude\", \"source\"]\n mjo_df = pd.read_table(\n infile, delim_whitespace=True, index_col=None, skiprows=2, names=col_names\n )\n mjo_df[\"time\"] = pd.to_datetime(mjo_df[[\"year\", \"month\", \"day\"]])\n mjo_df.set_index(\"time\", inplace=True)\n mjo_df = mjo_df[[\"RMM1\", \"RMM2\", \"phase\", \"amplitude\"]]\n mjo_df = mjo_df.loc[sdate:edate]\n mjo_ds = mjo_df.to_xarray()\n\n # save to file\n if os.path.isfile(outfile):\n os.remove(outfile)\n mjo_ds.to_netcdf(outfile, format=\"NETCDF4\", mode=\"w\")\n\n\ndef main():\n \"\"\"Parse the command line arguments and run download_data().\n \"\"\"\n args = parser.parse_args()\n outfile = os.path.abspath(args.outfile)\n infile = os.path.abspath(args.infile)\n sdate = datetime(int(args.syear), 1, 1)\n edate = datetime(int(args.eyear), 12, 31)\n download_data(sdate=sdate, edate=edate, infile=infile, outfile=outfile)\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.read_table",
"pandas.to_datetime"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
joshcombes/pyquil | [
"d61b4a086c622eea21e1cfcaccacfd6a9096d5bf"
] | [
"pyquil/tests/test_quantum_computer.py"
] | [
"import itertools\n\nimport networkx as nx\nimport numpy as np\nimport pytest\n\nfrom pyquil import Program, get_qc, list_quantum_computers\nfrom pyquil.api import QVM, QuantumComputer, local_qvm\nfrom pyquil.api._quantum_computer import _get_flipped_protoquil_program, _parse_name, \\\n _get_qvm_with_topology\nfrom pyquil.device import NxDevice, gates_in_isa\nfrom pyquil.gates import *\nfrom pyquil.quilbase import Declare, MemoryReference\nfrom pyquil.noise import decoherence_noise_with_asymmetric_ro\nfrom pyquil.pyqvm import PyQVM\nfrom pyquil.tests.utils import DummyCompiler\nfrom rpcq.messages import ParameterAref, PyQuilExecutableResponse\n\n\ndef test_get_flipped_program():\n program = Program([\n I(0),\n RX(2.3, 1),\n CNOT(0, 1),\n MEASURE(0, 0),\n MEASURE(1, 1),\n ])\n\n flipped_program = _get_flipped_protoquil_program(program)\n assert flipped_program.out().splitlines()[-6::] == [\n 'PRAGMA PRESERVE_BLOCK',\n 'RX(pi) 0',\n 'RX(pi) 1',\n 'PRAGMA END_PRESERVE_BLOCK',\n 'MEASURE 0 ro[0]',\n 'MEASURE 1 ro[1]',\n ]\n\n\ndef test_get_flipped_program_only_measure():\n program = Program([\n MEASURE(0, 0),\n MEASURE(1, 1),\n ])\n\n flipped_program = _get_flipped_protoquil_program(program)\n assert flipped_program.out().splitlines() == [\n 'DECLARE ro BIT[2]',\n 'PRAGMA PRESERVE_BLOCK',\n 'RX(pi) 0',\n 'RX(pi) 1',\n 'PRAGMA END_PRESERVE_BLOCK',\n 'MEASURE 0 ro[0]',\n 'MEASURE 1 ro[1]',\n ]\n\n\ndef test_device_stuff():\n topo = nx.from_edgelist([(0, 4), (0, 99)])\n qc = QuantumComputer(\n name='testy!',\n qam=None, # not necessary for this test\n device=NxDevice(topo),\n compiler=DummyCompiler()\n )\n assert nx.is_isomorphic(qc.qubit_topology(), topo)\n\n isa = qc.get_isa(twoq_type='CPHASE')\n assert sorted(isa.edges)[0].type == 'CPHASE'\n assert sorted(isa.edges)[0].targets == [0, 4]\n\n\ndef test_run(forest):\n device = NxDevice(nx.complete_graph(3))\n qc = QuantumComputer(\n name='testy!',\n qam=QVM(connection=forest, gate_noise=[0.01] * 3),\n device=device,\n compiler=DummyCompiler()\n )\n bitstrings = qc.run(\n Program(\n H(0),\n CNOT(0, 1),\n CNOT(1, 2),\n MEASURE(0, 0),\n MEASURE(1, 1),\n MEASURE(2, 2)).wrap_in_numshots_loop(1000)\n )\n\n assert bitstrings.shape == (1000, 3)\n parity = np.sum(bitstrings, axis=1) % 3\n assert 0 < np.mean(parity) < 0.15\n\n\ndef test_run_pyqvm_noiseless():\n device = NxDevice(nx.complete_graph(3))\n qc = QuantumComputer(\n name='testy!',\n qam=PyQVM(n_qubits=3),\n device=device,\n compiler=DummyCompiler()\n )\n bitstrings = qc.run(\n Program(\n H(0),\n CNOT(0, 1),\n CNOT(1, 2),\n MEASURE(0, 0),\n MEASURE(1, 1),\n MEASURE(2, 2)).wrap_in_numshots_loop(1000)\n )\n\n assert bitstrings.shape == (1000, 3)\n parity = np.sum(bitstrings, axis=1) % 3\n assert np.mean(parity) == 0\n\n\ndef test_run_pyqvm_noisy():\n device = NxDevice(nx.complete_graph(3))\n qc = QuantumComputer(\n name='testy!',\n qam=PyQVM(n_qubits=3, post_gate_noise_probabilities={'relaxation': 0.01}),\n device=device,\n compiler=DummyCompiler()\n )\n bitstrings = qc.run(\n Program(\n H(0),\n CNOT(0, 1),\n CNOT(1, 2),\n MEASURE(0, 0),\n MEASURE(1, 1),\n MEASURE(2, 2)).wrap_in_numshots_loop(1000)\n )\n\n assert bitstrings.shape == (1000, 3)\n parity = np.sum(bitstrings, axis=1) % 3\n assert 0 < np.mean(parity) < 0.15\n\n\ndef test_readout_symmetrization(forest):\n device = NxDevice(nx.complete_graph(3))\n noise_model = decoherence_noise_with_asymmetric_ro(gates=gates_in_isa(device.get_isa()))\n qc = QuantumComputer(\n name='testy!',\n qam=QVM(connection=forest, noise_model=noise_model),\n device=device,\n compiler=DummyCompiler()\n )\n\n prog = Program(I(0), X(1),\n MEASURE(0, 0),\n MEASURE(1, 1))\n prog.wrap_in_numshots_loop(1000)\n\n bs1 = qc.run(prog)\n avg0_us = np.mean(bs1[:, 0])\n avg1_us = 1 - np.mean(bs1[:, 1])\n diff_us = avg1_us - avg0_us\n assert diff_us > 0.03\n\n bs2 = qc.run_symmetrized_readout(prog, 1000)\n avg0_s = np.mean(bs2[:, 0])\n avg1_s = 1 - np.mean(bs2[:, 1])\n diff_s = avg1_s - avg0_s\n assert diff_s < 0.05\n\n\ndef test_list_qc():\n qc_names = list_quantum_computers(qpus=False)\n # TODO: update with deployed qpus\n assert qc_names == ['9q-square-qvm', '9q-square-noisy-qvm']\n\n\ndef test_parse_qc_name():\n name, qvm_type, noisy = _parse_name('9q-generic', None, None)\n assert name == '9q-generic'\n assert qvm_type is None\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic-qvm', None, None)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic-noisy-qvm', None, None)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert noisy\n\n\ndef test_parse_qc_flags():\n name, qvm_type, noisy = _parse_name('9q-generic', False, False)\n assert name == '9q-generic'\n assert qvm_type is None\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic', True, None)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic', True, True)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert noisy\n\n\ndef test_parse_qc_redundant():\n name, qvm_type, noisy = _parse_name('9q-generic', False, False)\n assert name == '9q-generic'\n assert qvm_type is None\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic-qvm', True, False)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert not noisy\n\n name, qvm_type, noisy = _parse_name('9q-generic-noisy-qvm', True, True)\n assert name == '9q-generic'\n assert qvm_type == 'qvm'\n assert noisy\n\n\ndef test_parse_qc_conflicting():\n with pytest.raises(ValueError) as e:\n name, qvm_type, noisy = _parse_name('9q-generic-qvm', False, False)\n\n assert e.match(r'.*but you have specified `as_qvm=False`')\n\n with pytest.raises(ValueError) as e:\n name, qvm_type, noisy = _parse_name('9q-generic-noisy-qvm', True, False)\n assert e.match(r'.*but you have specified `noisy=False`')\n\n\ndef test_parse_qc_strip():\n # Originally used `str.strip` to remove the suffixes. This is not correct!\n name, _, _ = _parse_name(\"mvq-qvm\", None, None)\n assert name == 'mvq'\n\n name, _, _ = _parse_name(\"mvq-noisy-qvm\", None, None)\n assert name == 'mvq'\n\n\ndef test_parse_qc_no_prefix():\n prefix, qvm_type, noisy = _parse_name('qvm', None, None)\n assert qvm_type == 'qvm'\n assert not noisy\n assert prefix == ''\n\n prefix, qvm_type, noisy = _parse_name('', True, None)\n assert qvm_type == 'qvm'\n assert not noisy\n assert prefix == ''\n\n\ndef test_parse_qc_no_prefix_2():\n prefix, qvm_type, noisy = _parse_name('noisy-qvm', None, None)\n assert qvm_type == 'qvm'\n assert noisy\n assert prefix == ''\n\n prefix, qvm_type, noisy = _parse_name('', True, True)\n assert qvm_type == 'qvm'\n assert noisy\n assert prefix == ''\n\n\ndef test_parse_qc_pyqvm():\n prefix, qvm_type, noisy = _parse_name('9q-generic-pyqvm', None, None)\n assert prefix == '9q-generic'\n assert qvm_type == 'pyqvm'\n assert not noisy\n\n\ndef test_qc(qvm, compiler):\n qc = get_qc('9q-square-noisy-qvm')\n assert isinstance(qc, QuantumComputer)\n assert isinstance(qc.qam, QVM)\n assert qc.qam.noise_model is not None\n assert qc.qubit_topology().number_of_nodes() == 9\n assert qc.qubit_topology().degree[0] == 2\n assert qc.qubit_topology().degree[4] == 4\n assert str(qc) == \"9q-square-noisy-qvm\"\n\n\ndef test_qc_run(qvm, compiler):\n qc = get_qc('9q-square-noisy-qvm')\n bs = qc.run_and_measure(Program(X(0)), trials=3)\n assert len(bs) == 9\n for q, bits in bs.items():\n assert bits.shape == (3,)\n\n\ndef test_nq_qvm_qc():\n for n_qubits in [2, 4, 7, 19]:\n qc = get_qc(f'{n_qubits}q-qvm')\n for q1, q2 in itertools.permutations(range(n_qubits), r=2):\n assert (q1, q2) in qc.qubit_topology().edges\n assert qc.name == f'{n_qubits}q-qvm'\n\n\ndef test_qc_noisy():\n qc = get_qc('5q', as_qvm=True, noisy=True)\n assert isinstance(qc, QuantumComputer)\n\n\ndef test_qc_compile():\n qc = get_qc('5q', as_qvm=True, noisy=True)\n qc.compiler = DummyCompiler()\n prog = Program()\n prog += H(0)\n prog1 = qc.compile(prog)\n assert prog1 == prog\n\n\ndef test_qc_error():\n # QVM is not a QPU\n with pytest.raises(ValueError):\n get_qc('9q-square-noisy-qvm', as_qvm=False)\n\n with pytest.raises(ValueError):\n get_qc('5q', as_qvm=False)\n\n\ndef test_run_and_measure(local_qvm_quilc):\n qc = get_qc(\"9q-generic-qvm\")\n prog = Program(I(8))\n trials = 11\n # note to devs: this is included as an example in the run_and_measure docstrings\n # so if you change it here ... change it there!\n with local_qvm(): # Redundant with test fixture.\n bitstrings = qc.run_and_measure(prog, trials)\n bitstring_array = np.vstack(bitstrings[q] for q in qc.qubits()).T\n assert bitstring_array.shape == (trials, len(qc.qubits()))\n\n\ndef test_run_symmetrized_readout_error(local_qvm_quilc):\n qc = get_qc(\"9q-generic-qvm\")\n trials = 11\n prog = Program(I(8))\n\n # Trials not even\n with pytest.raises(ValueError):\n bitstrings = qc.run_symmetrized_readout(prog, trials)\n\n\ndef test_qvm_compile_pickiness(forest):\n p = Program(X(0), MEASURE(0, 0))\n p.wrap_in_numshots_loop(1000)\n nq = PyQuilExecutableResponse(program=p.out(), attributes={'num_shots': 1000})\n\n # Ok, non-realistic\n qc = get_qc('9q-qvm')\n qc.run(p)\n\n # Also ok\n qc.run(nq)\n\n # Not ok\n qc = get_qc('9q-square-qvm')\n with pytest.raises(TypeError):\n qc.run(p)\n\n # Yot ok\n qc.run(nq)\n\n\ndef test_run_with_parameters(forest):\n device = NxDevice(nx.complete_graph(3))\n qc = QuantumComputer(\n name='testy!',\n qam=QVM(connection=forest),\n device=device,\n compiler=DummyCompiler()\n )\n bitstrings = qc.run(\n executable=Program(\n Declare(name='theta', memory_type='REAL'),\n Declare(name='ro', memory_type='BIT'),\n RX(MemoryReference('theta'), 0),\n MEASURE(0, MemoryReference('ro'))\n ).wrap_in_numshots_loop(1000),\n memory_map={'theta': [np.pi]}\n )\n\n assert bitstrings.shape == (1000, 1)\n assert all([bit == 1 for bit in bitstrings])\n\n\ndef test_reset(forest):\n device = NxDevice(nx.complete_graph(3))\n qc = QuantumComputer(\n name='testy!',\n qam=QVM(connection=forest),\n device=device,\n compiler=DummyCompiler()\n )\n p = Program(\n Declare(name='theta', memory_type='REAL'),\n Declare(name='ro', memory_type='BIT'),\n RX(MemoryReference('theta'), 0),\n MEASURE(0, MemoryReference('ro'))\n ).wrap_in_numshots_loop(1000)\n qc.run(\n executable=p,\n memory_map={'theta': [np.pi]}\n )\n\n aref = ParameterAref(name='theta', index=0)\n assert qc.qam._variables_shim[aref] == np.pi\n assert qc.qam._executable == p\n assert qc.qam._bitstrings.shape == (1000, 1)\n assert all([bit == 1 for bit in qc.qam._bitstrings])\n assert qc.qam.status == 'done'\n\n qc.reset()\n\n assert qc.qam._variables_shim == {}\n assert qc.qam._executable is None\n assert qc.qam._bitstrings is None\n assert qc.qam.status == 'connected'\n\n\ndef test_get_qvm_with_topology():\n topo = nx.from_edgelist([\n (5, 6),\n (6, 7),\n (10, 11),\n ])\n # Note to developers: perhaps make `get_qvm_with_topology` public in the future\n qc = _get_qvm_with_topology(name='test-qvm', topology=topo)\n assert len(qc.qubits()) == 5\n assert min(qc.qubits()) == 5\n\n\ndef test_get_qvm_with_topology_2(forest):\n topo = nx.from_edgelist([\n (5, 6),\n (6, 7),\n ])\n qc = _get_qvm_with_topology(name='test-qvm', topology=topo)\n results = qc.run_and_measure(Program(X(5)), trials=5)\n assert sorted(results.keys()) == [5, 6, 7]\n assert all(x == 1 for x in results[5])\n\n\ndef test_parse_mix_qvm_and_noisy_flag():\n # https://github.com/rigetti/pyquil/issues/764\n name, qvm_type, noisy = _parse_name('1q-qvm', as_qvm=None, noisy=True)\n assert noisy\n\n\ndef test_noisy(forest):\n # https://github.com/rigetti/pyquil/issues/764\n p = Program(X(0))\n qc = get_qc('1q-qvm', noisy=True)\n result = qc.run_and_measure(p, trials=10000)\n assert result[0].mean() < 1.0\n"
] | [
[
"numpy.mean",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lionelmessi6410/Panorama-Stitching | [
"97e862d80ef168ad9306be9df8b108c1b2a3207f"
] | [
"code/EvaluateAffineMatrix.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Aug 27 11:53:25 2017\n\n@author: HGY\n\"\"\"\n\n'''\n# EvaluateAffineMatrix.py\n# Run this script to test your ComputeAffineMatrix() function\n# using sample data. You do not need to change anything in this script.\n'''\n\nimport numpy as np\nfrom scipy.io import loadmat\nfrom ComputeAffineMatrix import ComputeAffineMatrix\n\n#%% Test Data (You should not change the data here)\nsrcPt = np.asarray([[0.5, 0.1], [0.4, 0.2], [0.8, 0.2]])\ndstPt = np.asarray([[0.3, -0.2], [-0.4, -0.9], [0.1, 0.1]])\n\n#%% Calls your implementation of ComputeAffineMatrix.m\nH = ComputeAffineMatrix(srcPt, dstPt)\n\n#%% Load data and check solution\nsolution = loadmat('../checkpoint/Affine_ref.mat')['solution']\nerror = np.sum(np.square(H-solution))\nprint('Difference from reference solution: ',str(error))\n\nif error < 1e-20:\n print('\\nAccepted!')\nelse:\n print('\\nThere is something wrong.')"
] | [
[
"numpy.asarray",
"numpy.square",
"scipy.io.loadmat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
tomo726/DiffAugment-stylegan_pikachu | [
"a09e54235fda1d1e17fe5d8f180509dcb3e61c06"
] | [
"generate_gif.py"
] | [
"import argparse\nimport os\nimport numpy as np\nfrom PIL import Image\n\nimport dnnlib\nfrom dnnlib import tflib\n\nfrom training import misc\n\n\ndef run(resume, output, num_rows, num_cols, resolution, num_phases, transition_frames, static_frames, seed):\n tflib.init_tf({'rnd.np_random_seed': seed})\n _, _, Gs = misc.load_pkl(resume)\n output_seq = []\n batch_size = num_rows * num_cols\n latent_size = Gs.input_shape[1]\n latents = [np.random.randn(batch_size, latent_size) for _ in range(num_phases)]\n \n Gs_kwargs = dnnlib.EasyDict()\n Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8, nchw_to_nhwc=True)\n Gs_kwargs.randomize_noise = False\n\n def to_image_grid(outputs):\n outputs = np.reshape(outputs, [num_rows, num_cols, *outputs.shape[1:]])\n outputs = np.concatenate(outputs, axis=1)\n outputs = np.concatenate(outputs, axis=1)\n return Image.fromarray(outputs).resize((resolution * num_cols, resolution * num_rows), Image.ANTIALIAS)\n \n for i in range(num_phases):\n dlatents0 = Gs.components.mapping.run(latents[i - 1], None)\n dlatents1 = Gs.components.mapping.run(latents[i], None)\n for j in range(transition_frames):\n dlatents = (dlatents0 * (transition_frames - j) + dlatents1 * j) / transition_frames\n output_seq.append(to_image_grid(Gs.components.synthesis.run(dlatents, **Gs_kwargs)))\n output_seq.extend([to_image_grid(Gs.components.synthesis.run(dlatents1, **Gs_kwargs))] * static_frames)\n if not output.endswith('.gif'):\n output += '.gif'\n output_seq[0].save(output, save_all=True, append_images=output_seq[1:], optimize=False, duration=50, loop=0)\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description='Generate GIF.',\n formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument('-r', '--resume', help='Resume checkpoint path', required=True)\n parser.add_argument('-o', '--output', help='Output file name', required=True)\n parser.add_argument('--num-rows', help='Number of rows', default=2, type=int)\n parser.add_argument('--num-cols', help='Number of columns', default=3, type=int)\n parser.add_argument('--resolution', help='Resolution of the output images', default=128, type=int)\n parser.add_argument('--num-phases', help='Number of phases', default=5, type=int)\n parser.add_argument('--transition-frames', help='Number of transition frames per phase', default=20, type=int)\n parser.add_argument('--static-frames', help='Number of static frames per phase', default=5, type=int)\n parser.add_argument('--seed', help='Random seed', default=1000, type=int)\n\n args = parser.parse_args()\n\n run(**vars(args))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.reshape",
"numpy.random.randn",
"numpy.concatenate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
croberson94/tensorflow | [
"99b0d28cffba6767dc10e7864265ddcc10f0007f"
] | [
"tensorflow/python/ops/ragged/ragged_factory_ops_test.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests that ragged tensors work with GPU, such as placement of int and string.\n\nTest using ragged tensors with map_fn and distributed dataset. Since GPU does\nnot support strings, ragged tensors containing string should always be placed\non CPU.\n\"\"\"\n\nfrom absl.testing import parameterized\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.ops import multi_device_iterator_ops\nfrom tensorflow.python.distribute import mirrored_strategy\nfrom tensorflow.python.eager import def_function\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import map_fn\nfrom tensorflow.python.ops.ragged import ragged_factory_ops\nfrom tensorflow.python.platform import test\n\n\ndef ragged_int64():\n return ragged_factory_ops.constant(\n [\n [3, 1, 4, 1],\n [],\n [5, 9, 2],\n [6],\n [],\n [3, 1, 4, 1],\n [3, 1],\n [2, 1, 4, 1],\n ],\n dtype=dtypes.int64,\n )\n\n\ndef ragged_str():\n return ragged_factory_ops.constant([\n ['3', '1', '4', '1'],\n [],\n ['5', '9', '2'],\n ['6'],\n [],\n ['3', '1', '4', '1'],\n ['3', '1'],\n ['2', '1', '4', '1'],\n ])\n\n\nclass RaggedFactoryOpsTest(test_util.TensorFlowTestCase,\n parameterized.TestCase):\n\n @parameterized.parameters(\n (ragged_int64,),\n (ragged_str,),\n )\n def testRaggedWithMapFn(self, ragged_factory):\n\n @def_function.function\n def map_fn_producer(inputs):\n return map_fn.map_fn_v2(lambda x: x, inputs)\n\n t = ragged_factory()\n result = self.evaluate(map_fn_producer(t))\n self.assertAllEqual(t.values, result.values)\n\n @parameterized.parameters(\n (ragged_int64,),\n (ragged_str,),\n )\n def testRaggedWithMultiDeviceIterator(self, ragged_factory):\n\n @def_function.function\n def dataset_producer(t):\n ragged_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)\n it = multi_device_iterator_ops.MultiDeviceIterator(ragged_ds, ['GPU:0'])\n with ops.device_v2('GPU:0'):\n return it.get_next_as_optional()\n\n t = ragged_factory()\n if t.dtype == dtypes.string:\n self.skipTest('b/194439197: fix ragged tensor of string')\n result = dataset_producer(t)\n self.assertAllEqual(\n self.evaluate(t[0]), self.evaluate(result[0].get_value()[0]))\n\n @parameterized.parameters(\n (ragged_int64,),\n (ragged_str,),\n )\n def testRaggedWithDistributedDataset(self, ragged_factory):\n\n @def_function.function\n def distributed_dataset_producer(t):\n strategy = mirrored_strategy.MirroredStrategy(['GPU:0', 'GPU:1'])\n ragged_ds = dataset_ops.Dataset.from_tensor_slices(t).batch(2)\n dist_dataset = strategy.experimental_distribute_dataset(ragged_ds)\n ds = iter(dist_dataset)\n return strategy.experimental_local_results(next(ds))[0]\n\n t = ragged_factory()\n if t.dtype == dtypes.string:\n self.skipTest('b/194439197: fix ragged tensor of string')\n\n result = distributed_dataset_producer(t)\n self.assertAllEqual(self.evaluate(t[0]), self.evaluate(result[0]))\n\n\nif __name__ == '__main__':\n test.main()\n"
] | [
[
"tensorflow.python.framework.ops.device_v2",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.ops.ragged.ragged_factory_ops.constant",
"tensorflow.python.platform.test.main",
"tensorflow.python.data.ops.multi_device_iterator_ops.MultiDeviceIterator",
"tensorflow.python.distribute.mirrored_strategy.MirroredStrategy",
"tensorflow.python.ops.map_fn.map_fn_v2"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
inkydragon/taichi | [
"b5f80c2771b578c2a32b9a024a351aafc8ca7a0b"
] | [
"examples/mpm128.py"
] | [
"import taichi as ti\nimport numpy as np\n\nti.init(arch=ti.gpu) # Try to run on GPU\n\nquality = 1 # Use a larger value for higher-res simulations\nn_particles, n_grid = 9000 * quality ** 2, 128 * quality\ndx, inv_dx = 1 / n_grid, float(n_grid)\ndt = 1e-4 / quality\np_vol, p_rho = (dx * 0.5)**2, 1\np_mass = p_vol * p_rho\nE, nu = 5e3, 0.2 # Young's modulus and Poisson's ratio\nmu_0, lambda_0 = E / (2 * (1 + nu)), E * nu / ((1+nu) * (1 - 2 * nu)) # Lame parameters\n\nx = ti.Vector(2, dt=ti.f32, shape=n_particles) # position\nv = ti.Vector(2, dt=ti.f32, shape=n_particles) # velocity\nC = ti.Matrix(2, 2, dt=ti.f32, shape=n_particles) # affine velocity field\nF = ti.Matrix(2, 2, dt=ti.f32, shape=n_particles) # deformation gradient\nmaterial = ti.var(dt=ti.i32, shape=n_particles) # material id\nJp = ti.var(dt=ti.f32, shape=n_particles) # plastic deformation\ngrid_v = ti.Vector(2, dt=ti.f32, shape=(n_grid, n_grid)) # grid node momentum/velocity\ngrid_m = ti.var(dt=ti.f32, shape=(n_grid, n_grid)) # grid node mass\ngravity = ti.Vector(2, dt=ti.f32, shape=())\nattractor_strength = ti.var(dt=ti.f32, shape=())\nattractor_pos = ti.Vector(2, dt=ti.f32, shape=())\n\[email protected]\ndef substep():\n for i, j in grid_m:\n grid_v[i, j] = [0, 0]\n grid_m[i, j] = 0\n for p in x: # Particle state update and scatter to grid (P2G)\n base = (x[p] * inv_dx - 0.5).cast(int)\n fx = x[p] * inv_dx - base.cast(float)\n # Quadratic kernels [http://mpm.graphics Eqn. 123, with x=fx, fx-1,fx-2]\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1) ** 2, 0.5 * (fx - 0.5) ** 2]\n F[p] = (ti.Matrix.identity(ti.f32, 2) + dt * C[p]) @ F[p] # deformation gradient update\n h = max(0.1, min(5, ti.exp(10 * (1.0 - Jp[p])))) # Hardening coefficient: snow gets harder when compressed\n if material[p] == 1: # jelly, make it softer\n h = 0.3\n mu, la = mu_0 * h, lambda_0 * h\n if material[p] == 0: # liquid\n mu = 0.0\n U, sig, V = ti.svd(F[p])\n J = 1.0\n for d in ti.static(range(2)):\n new_sig = sig[d, d]\n if material[p] == 2: # Snow\n new_sig = min(max(sig[d, d], 1 - 2.5e-2), 1 + 4.5e-3) # Plasticity\n Jp[p] *= sig[d, d] / new_sig\n sig[d, d] = new_sig\n J *= new_sig\n if material[p] == 0: # Reset deformation gradient to avoid numerical instability\n F[p] = ti.Matrix.identity(ti.f32, 2) * ti.sqrt(J)\n elif material[p] == 2:\n F[p] = U @ sig @ V.T() # Reconstruct elastic deformation gradient after plasticity\n stress = 2 * mu * (F[p] - U @ V.T()) @ F[p].T() + ti.Matrix.identity(ti.f32, 2) * la * J * (J - 1)\n stress = (-dt * p_vol * 4 * inv_dx * inv_dx) * stress\n affine = stress + p_mass * C[p]\n for i, j in ti.static(ti.ndrange(3, 3)): # Loop over 3x3 grid node neighborhood\n offset = ti.Vector([i, j])\n dpos = (offset.cast(float) - fx) * dx\n weight = w[i][0] * w[j][1]\n grid_v[base + offset] += weight * (p_mass * v[p] + affine @ dpos)\n grid_m[base + offset] += weight * p_mass\n for i, j in grid_m:\n if grid_m[i, j] > 0: # No need for epsilon here\n grid_v[i, j] = (1 / grid_m[i, j]) * grid_v[i, j] # Momentum to velocity\n grid_v[i, j] += dt * gravity[None] * 30 # gravity\n dist = attractor_pos[None] - dx * ti.Vector([i, j])\n grid_v[i, j] += dist / (0.01 + dist.norm()) * attractor_strength[None] * dt * 100\n if i < 3 and grid_v[i, j][0] < 0: grid_v[i, j][0] = 0 # Boundary conditions\n if i > n_grid - 3 and grid_v[i, j][0] > 0: grid_v[i, j][0] = 0\n if j < 3 and grid_v[i, j][1] < 0: grid_v[i, j][1] = 0\n if j > n_grid - 3 and grid_v[i, j][1] > 0: grid_v[i, j][1] = 0\n for p in x: # grid to particle (G2P)\n base = (x[p] * inv_dx - 0.5).cast(int)\n fx = x[p] * inv_dx - base.cast(float)\n w = [0.5 * (1.5 - fx) ** 2, 0.75 - (fx - 1.0) ** 2, 0.5 * (fx - 0.5) ** 2]\n new_v = ti.Vector.zero(ti.f32, 2)\n new_C = ti.Matrix.zero(ti.f32, 2, 2)\n for i, j in ti.static(ti.ndrange(3, 3)): # loop over 3x3 grid node neighborhood\n dpos = ti.Vector([i, j]).cast(float) - fx\n g_v = grid_v[base + ti.Vector([i, j])]\n weight = w[i][0] * w[j][1]\n new_v += weight * g_v\n new_C += 4 * inv_dx * weight * g_v.outer_product(dpos)\n v[p], C[p] = new_v, new_C\n x[p] += dt * v[p] # advection\n\[email protected]\ndef reset():\n group_size = n_particles // 3\n for i in range(n_particles):\n x[i] = [ti.random() * 0.2 + 0.3 + 0.10 * (i // group_size), ti.random() * 0.2 + 0.05 + 0.32 * (i // group_size)]\n material[i] = i // group_size # 0: fluid 1: jelly 2: snow\n v[i] = [0, 0]\n F[i] = ti.Matrix([[1, 0], [0, 1]])\n Jp[i] = 1\n C[i] = ti.Matrix.zero(ti.f32, 2, 2)\n \nprint(\"[Hint] Use WSAD/arrow keys to control gravity. Use left/right mouse bottons to attract/repel. Press R to reset.\")\ngui = ti.GUI(\"Taichi MLS-MPM-128\", res=512, background_color=0x112F41)\nreset()\ngravity[None] = [0, -1]\n\nfor frame in range(20000):\n if gui.get_event(ti.GUI.PRESS):\n if gui.event.key == 'r': reset()\n elif gui.event.key in [ti.GUI.ESCAPE, ti.GUI.EXIT]: break\n if gui.event is not None: gravity[None] = [0, 0] # if had any event\n if gui.is_pressed(ti.GUI.LEFT, 'a'): gravity[None][0] = -1\n if gui.is_pressed(ti.GUI.RIGHT, 'd'): gravity[None][0] = 1\n if gui.is_pressed(ti.GUI.UP, 'w'): gravity[None][1] = 1\n if gui.is_pressed(ti.GUI.DOWN, 's'): gravity[None][1] = -1\n mouse = gui.get_cursor_pos()\n gui.circle((mouse[0], mouse[1]), color=0x336699, radius=15)\n attractor_pos[None] = [mouse[0], mouse[1]]\n attractor_strength[None] = 0\n if gui.is_pressed(ti.GUI.LMB):\n attractor_strength[None] = 1\n if gui.is_pressed(ti.GUI.RMB):\n attractor_strength[None] = -1\n for s in range(int(2e-3 // dt)):\n substep()\n colors = np.array([0x068587, 0xED553B, 0xEEEEF0], dtype=np.uint32)\n gui.circles(x.to_numpy(), radius=1.5, color=colors[material.to_numpy()])\n gui.show() # Change to gui.show(f'{frame:06d}.png') to write images to disk\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Shamli28/Sales-Data-Analysis | [
"bf5e45b8e209fdfca55ec19787672cdb2add503e"
] | [
"Sales Data Analysis.py"
] | [
"#!/usr/bin/env python\n# coding: utf-8\n\n# # Preparing Data for Analysis\n\n# - Perform Analysis & basically derive insights from the data\n\n# ## Data Preparation\n\n# In[2]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os # whenever we have to deal with like,say,whenever we have to create some folder/directory/remove the directory/modify\n\n\n# - Using os,you can fetch all the files available in particular location\n\n# In[4]:\n\n\nfiles=[file for file in os.listdir('/home/shamli/Downloads/Datasets/Sales_Data/')] #append each n every file in my list\nfor file in files:\n print(file)\n\n\n# In[5]:\n\n\npath='/home/shamli/Downloads/Datasets/Sales_Data/' # deine a variable(path with location)\nall_data=pd.DataFrame() #Declared blank dataframe & concatenate all the data in all_data\n\nfor file in files: #iterate over my list(file)\n current_df=pd.read_csv(path+\"/\"+file) #path+threshold(say where my file/wht is file)\n all_data=pd.concat([all_data,current_df])#concatenate dataframe \n\nall_data.shape\n\n\n# In[6]:\n\n\nall_data.to_csv('/home/shamli/Downloads/Datasets/Sales_Data/all_data.csv', index=False) #convert this data into csv,set the index-False dont want the index.all_data file has created\n\n\n# In[7]:\n\n\nall_data.head()\n\n\n# In[8]:\n\n\n# checking missing values\nall_data.isnull().sum()\n\n\n# In[9]:\n\n\n# droping missing values\nall_data=all_data.dropna(how='all')\nall_data.shape\n\n\n# # Analyzing Monthly Sales\n\n# ## What is best month of sales\n\n# In[10]:\n\n\n'12/30/19 00:01'.split('/')[0] #separate it on basis of operator\n\n\n# In[12]:\n\n\ndef month(x):\n return x.split('/')[0]\n\n\n# In[13]:\n\n\nall_data['month']=all_data['Order Date'].apply(month) #apply function using apply \n\n\n# In[14]:\n\n\nall_data.head()\n\n\n# In[15]:\n\n\n# datatype of entire dataframe\nall_data.dtypes\n\n\n# In[16]:\n\n\n# change the datatypes of three columns\nall_data['month']=all_data['month'].astype(int)\n\n\n# In[17]:\n\n\n# check the unique value\nall_data['month'].unique()\n\n\n# In[18]:\n\n\nfilter=all_data['month']=='Order Date'\nall_data=all_data[~filter] #applied negation for this filter\nall_data.head()\n\n\n# - Now we can easily convert datadrame to int\n\n# In[19]:\n\n\nall_data['month']=all_data['month'].astype(int)\n\n\n# In[20]:\n\n\nall_data.dtypes\n\n\n# In[21]:\n\n\nall_data['Quantity Ordered']=all_data['Quantity Ordered'].astype(int)\nall_data['Price Each']=all_data['Price Each'].astype(float)\n\n\n# In[22]:\n\n\nall_data.dtypes\n\n\n# In[23]:\n\n\n# add fifth column \nall_data['Sales']=all_data['Quantity Ordered']*all_data['Price Each']\n\n\n# In[24]:\n\n\nall_data.head()\n\n\n# In[25]:\n\n\nall_data.groupby('month')['Sales'].sum()\n\n\n# In[26]:\n\n\nmonths=range(1,13)\nplt.bar(months, all_data.groupby('month')['Sales'].sum())\nplt.xticks(months) # setup xaxis\nplt.xlabel('month')\nplt.ylabel('Sales in USD')\n\n\n# # Analyzing Maximum Order & Hour Analysis\n\n# ## Which city has max order\n\n# In[27]:\n\n\nall_data.head()\n\n\n# In[28]:\n\n\n# fetch city from purchase address and add column city \n'136 Church St, New York City, NY 10001'.split(',')[1] #access first index from list\n\n\n# In[29]:\n\n\ndef city(x):\n return x.split(',')[1]\n\n\n# In[30]:\n\n\nall_data['City']=all_data['Purchase Address'].apply(city)\n\n\n# In[31]:\n\n\nall_data.head()\n\n\n# In[32]:\n\n\nall_data.groupby('City')['City'].count().plot.bar()\n\n\n# - Max value is for San Franchisco\n\n# In[33]:\n\n\nall_data['Order Date'].dtype\n\n\n# In[34]:\n\n\n# Convert column to daytime\nall_data['Hour']=pd.to_datetime(all_data['Order Date']).dt.hour\n\n\n# In[35]:\n\n\nall_data.head()\n\n\n# In[43]:\n\n\nkeys=[] # Define list\nhour=[] # define hourlist\nfor key,hour_df in all_data.groupby('Hour'):\n keys.append(key)\n hour.append(len(hour_df))\n\n\n# In[44]:\n\n\nkeys\n\n\n# In[45]:\n\n\nhour\n\n\n# In[47]:\n\n\nplt.grid()\nplt.plot(keys,hour)\n\n\n# # Analyzing Most Sold Products\n\n# ## What product sold the most & why?\n\n# In[49]:\n\n\n# need to groupby on product bcz have to define what products sold the most\nall_data.groupby('Product')['Quantity Ordered'].sum().plot(kind='bar')\n\n\n# In[50]:\n\n\n# analyze why this product has a max set selective \nall_data.groupby('Product')['Price Each'].mean() # mean price of each n every product\n\n\n# In[51]:\n\n\n# Visualize all this stuff\nproducts=all_data.groupby('Product')['Quantity Ordered'].sum().index\nquantity=all_data.groupby('Product')['Quantity Ordered'].sum()\nprice=all_data.groupby('Product')['Price Each'].mean()\n\n\n# In[55]:\n\n\nfig,ax1=plt.subplots()\nax2=ax1.twinx() #create a twin axis sharing the xaxis\nax1.bar(products,quantity,color='g')\nax2.plot(products,price)\nax1.set_xticklabels(products,rotation='vertical',size=8) # deal with xaxis\n\n\n# - top selling products seem to have a correlation with the price of the product.\n# \n# - Cheaper the product, the higher the quantity ordered as well as vice versa.\n\n# ## What products are most often sold together?\n\n# In[56]:\n\n\nall_data.head()\n\n\n# - Here,you have to keep that orders that have same order ids because that are basically that products that are mostly sold together.\n\n# In[60]:\n\n\n# Keep all the order IDs that has some duplicate values\ndf=all_data['Order ID'].duplicated(keep=False)\ndf2=all_data[df] # pass the filter in dataframe\ndf2.head()\n\n\n# - Right Now,it means you have to do some of the transformation on this dataframe to reach to ur problem statement.\n\n# In[64]:\n\n\n# perform join operation\ndf2['Grouped']=df2.groupby('Order ID')['Product'].transform(lambda x:','.join(x))\n\n\n# In[65]:\n\n\ndf2.head()\n\n\n# In[66]:\n\n\ndf2.drop_duplicates(subset=['Order ID'])\ndf2.head()\n\n\n# In[68]:\n\n\ndf2['Grouped'].value_counts()[0:5].plot.pie()\n\n\n# In[ ]:\n\n\n\n\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.to_datetime",
"matplotlib.pyplot.subplots",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
muxevola/imagepy | [
"d6d8cbf214f47a4a545a0d283ae393a6932c4c0f"
] | [
"imagepy/menus/Table/Statistic/statistic_plgs.py"
] | [
"from imagepy.core.engine import Table\nimport pandas as pd\nfrom imagepy import IPy\n\nclass Statistic(Table):\n\ttitle = 'Table Statistic'\n\tnote = ['snap', 'only_num', 'row_msk', 'col_msk']\n\n\tpara = {'axis':'Column', 'sum':True, 'mean':True,'max':False, \n\t\t'min':False,'var':False,'std':False,'skew':False,'kurt':False}\n\t\t\n\tview = [(list, 'axis', ['Row', 'Column'], str, 'axis', ''),\n\t\t\t(bool, 'sum', 'sum'),\n\t\t\t(bool, 'mean', 'mean'),\n\t\t\t(bool, 'max', 'max'),\n\t\t\t(bool, 'min', 'min'),\n\t\t\t(bool, 'var', 'var'),\n\t\t\t(bool, 'std', 'std'),\n\t\t\t(bool, 'skew', 'skew'),\n\t\t\t(bool, 'kurt', 'kurt')]\n\n\tdef run(self, tps, data, snap, para=None):\n\t\trst, axis = {}, (0,1)[para['axis']=='Row']\n\t\tif para['sum']:rst['sum'] = snap.sum(axis=axis)\n\t\tif para['mean']:rst['mean'] = snap.mean(axis=axis)\n\t\tif para['max']:rst['max'] = snap.max(axis=axis)\n\t\tif para['min']:rst['min'] = snap.min(axis=axis)\n\t\tif para['var']:rst['var'] = snap.var(axis=axis)\n\t\tif para['std']:rst['std'] = snap.std(axis=axis)\n\t\tif para['skew']:rst['skew'] = snap.skew(axis=axis)\n\t\tif para['kurt']:rst['kurt'] = snap.kurt(axis=axis)\n\t\tIPy.show_table(pd.DataFrame(rst), tps.title+'-statistic')\n\nclass GroupStatistic(Table):\n\ttitle = 'Group Statistic'\n\n\tpara = {'major':None, 'minor':None, 'sum':True, 'mean':True,'max':False, \n\t\t'min':False,'var':False,'std':False,'skew':False,'kurt':False, 'cn':[]}\n\t\t\n\tview = [('fields', 'cn', 'field to statistic'),\n\t\t\t('field', 'major', 'group by', 'major'),\n\t\t\t('field', 'minor', 'group by', 'key'),\n\t\t\t\n\t\t\t(bool, 'sum', 'sum'),\n\t\t\t(bool, 'mean', 'mean'),\n\t\t\t(bool, 'max', 'max'),\n\t\t\t(bool, 'min', 'min'),\n\t\t\t(bool, 'var', 'var'),\n\t\t\t(bool, 'std', 'std'),\n\t\t\t(bool, 'skew', 'skew')]\n\n\tdef run(self, tps, data, snap, para=None):\n\t\tby = [i for i in [para['major'], para['minor']] if i!='None']\n\t\tgp = data.groupby(by)[para['cn']]\n\n\t\trst = []\n\t\tdef post(a, fix): \n\t\t\ta.columns = ['%s-%s'%(i,fix) for i in a.columns]\n\t\t\treturn a\n\n\t\tif para['sum']:rst.append(post(gp.sum(), 'sum'))\n\t\tif para['mean']:rst.append(post(gp.mean(), 'mean'))\n\t\tif para['max']:rst.append(post(gp.max(), 'max'))\n\t\tif para['min']:rst.append(post(gp.min(), 'min'))\n\t\tif para['var']:rst.append(post(gp.var(), 'var'))\n\t\tif para['std']:rst.append(post(gp.std(), 'std'))\n\t\tif para['skew']:rst.append(post(gp.skew(), 'skew'))\n\n\t\tIPy.show_table(pd.concat(rst, axis=1), tps.title+'-statistic')\n\nplgs = [Statistic, GroupStatistic]"
] | [
[
"pandas.concat",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
anonymous0120/attentional_pervasive_fabricate_vanish_attack | [
"fa1d6e108c5e2521f801b9d53a3194e781942e61"
] | [
"apfv/attacks/afv.py"
] | [
"# Copyright 2021 Yantao Lu\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\" Script for attentional fabricate-vanish attack.\n\"\"\"\nimport copy\nimport numpy as np\nfrom torch.autograd import Variable\nimport torch\nimport scipy.stats as st\nfrom scipy import ndimage\nimport warnings\n\nimport pdb\n \n\nclass AFV(object):\n \"\"\" Class of attentional fabricate-vanish attack.\n \"\"\"\n def __init__(self, \n victim_model: torch.nn.Module,\n attention_model: torch.nn.Module, \n attention_optimizer: torch.optim.Optimizer=None,\n decay_factor: float=1.0, prob: float=0.5,\n epsilon: float=16./255, steps: int=100,\n step_size: float=2./255, image_resize: int=330,\n dr_weight: float=100.0, ti_smoothing: bool=False,\n ti_kernel_radius: tuple=(15, 3), random_start: bool=False,\n attack_config: dict=None):\n \"\"\"\n Related Paper link: https://arxiv.org/pdf/1803.06978.pdf\n \"\"\"\n self._victim_model = copy.deepcopy(victim_model)\n self._attention_model = attention_model\n self._attention_optimizer = attention_optimizer\n self._loss_fn = torch.nn.CrossEntropyLoss().cuda()\n if attack_config == None:\n self._epsilon = epsilon\n self._steps = steps\n self._step_size = step_size\n self._rand = random_start\n self._decay_factor = decay_factor\n self._prob = prob\n self._image_resize = image_resize\n self._dr_weight = dr_weight\n self._ti_smoothing = ti_smoothing\n self._ti_kernel_radius = ti_kernel_radius\n else:\n warnings.warn(\"Over-writting AFV pameters by |attack_config|.\")\n self._epsilon = attack_config[\"epsilon\"]\n self._steps = attack_config[\"steps\"]\n self._step_size = attack_config[\"step_size\"]\n self._rand = attack_config[\"random_start\"]\n self._decay_factor = attack_config[\"decay_factor\"]\n self._prob = attack_config[\"prob\"]\n self._image_resize = attack_config[\"image_resize\"]\n self._dr_weight = attack_config[\"dr_weight\"]\n self._ti_smoothing = attack_config[\"ti_smoothing\"]\n self._ti_kernel_radius = attack_config[\"ti_kernel_radius\"]\n\n if self._ti_smoothing:\n assert len(self._ti_kernel_radius) == 2\n kernel = self.gkern(\n self._ti_kernel_radius[0],\n self._ti_kernel_radius[1]).astype(np.float32)\n self._stack_kernel = np.stack([kernel, kernel, kernel])\n\n def __call__(self,\n X_nat: torch.Tensor,\n y: torch.Tensor,\n internal: tuple=()) -> torch.Tensor:\n \"\"\"\n Given examples (X_nat, y), returns adversarial\n examples within epsilon of X_nat in l_infinity norm.\n \"\"\"\n X_nat_np = X_nat.detach().cpu().numpy()\n for p in self._victim_model.parameters():\n p.requires_grad = False\n \n self._victim_model.eval()\n if self._rand:\n X = X_nat_np + np.random.uniform(-self._epsilon,\n self._epsilon,\n X_nat_np.shape).astype('float32')\n else:\n X = np.copy(X_nat_np)\n \n momentum = 0.0\n attention_map = None\n attention_map_first = None\n attention_map_last = None\n for step_idx in range(self._steps):\n X_var = Variable(torch.from_numpy(X).cuda(),\n requires_grad=True, volatile=False)\n y_var = y.cuda()\n\n # Calculate attention map.\n if self._attention_optimizer == None:\n self._attention_model.eval()\n else:\n self._attention_model.train()\n\n attention_map = self._attention_model(X_var)\n if step_idx == 0:\n attention_map_first = attention_map.clone().detach()\n elif step_idx == self._steps - 1:\n attention_map_last = attention_map.clone().detach()\n\n # Foreground processing\n X_fg_var = X_var * attention_map_first\n layers, _ = self._victim_model(X_fg_var, internal=internal)\n # Background processing\n X_bg_var = X_var * (1. - attention_map_first)\n rnd = np.random.rand()\n if rnd < self._prob:\n transformer = _tranform_resize_padding(\n X.shape[-2], X.shape[-1], self._image_resize,\n resize_back=True)\n X_trans_var = transformer(X_bg_var)\n else:\n X_trans_var = X_bg_var\n _, scores = self._victim_model(X_trans_var, internal=internal)\n\n # Calculate gradients for dispersion loss.\n loss_dr = 0.0\n for layer_idx, target_layer in enumerate(layers):\n temp_loss_dr = target_layer.var()\n loss_dr += temp_loss_dr\n # Calculate gradients for logit loss(TIDIM).\n loss_logit = -1 * self._loss_fn(scores, y_var)\n # Combine loss\n loss = loss_logit + self._dr_weight * loss_dr\n\n self._victim_model.zero_grad()\n if self._attention_optimizer != None:\n self._attention_optimizer.zero_grad()\n loss.backward()\n # train attention model if optimizer is not None.\n if self._attention_optimizer != None:\n self._attention_model.train()\n self._attention_optimizer.step()\n\n # Update adversarial image\n grad = X_var.grad.data.cpu().numpy()\n # Apply translation-invariant if |_ti_smoothing| flag is turned on.\n if self._ti_smoothing:\n grad = self.depthwise_conv2d(grad, self._stack_kernel)\n\n X_var.grad.zero_()\n velocity = grad / np.mean(np.absolute(grad), axis=(1, 2, 3))\n momentum = self._decay_factor * momentum + velocity\n\n X -= self._step_size * np.sign(momentum)\n X = np.clip(X, X_nat_np - self._epsilon, X_nat_np + self._epsilon)\n X = np.clip(X, 0, 1) # ensure valid pixel range\n return torch.from_numpy(X), attention_map_first, attention_map_last\n\n @staticmethod\n def gkern(kernlen, nsig):\n \"\"\"Returns a 2D Gaussian kernel array.\"\"\"\n x = np.linspace(-nsig, nsig, kernlen)\n kern1d = st.norm.pdf(x)\n kernel_raw = np.outer(kern1d, kern1d)\n kernel = kernel_raw / kernel_raw.sum()\n return kernel\n\n @staticmethod\n def depthwise_conv2d(in1, stack_kernel):\n ret = []\n for temp_in in in1:\n # numpy convolve operates differently to CNN conv, \n # however they are the same when keernel is symetric.\n temp_out = ndimage.convolve(temp_in, stack_kernel, mode='constant')\n ret.append(temp_out)\n ret = np.array(ret)\n return ret\n\n\nclass _tranform_resize_padding(torch.nn.Module):\n def __init__(self, image_h, image_w, image_resize, resize_back=False):\n super(_tranform_resize_padding, self).__init__()\n self.shape = [image_h, image_w]\n self._image_resize = image_resize\n self.resize_back = resize_back\n\n def __call__(self, input_tensor):\n assert self.shape[0] < self._image_resize \\\n and self.shape[1] < self._image_resize\n rnd = np.random.randint(self.shape[1], self._image_resize)\n input_upsample = torch.nn.functional.interpolate(\n input_tensor, size=(rnd, rnd), mode='nearest')\n h_rem = self._image_resize - rnd\n w_rem = self._image_resize - rnd\n pad_top = np.random.randint(0, h_rem)\n pad_bottom = h_rem - pad_top\n pad_left = np.random.randint(0, w_rem)\n pad_right = w_rem - pad_left\n padder = torch.nn.ConstantPad2d(\n (pad_left, pad_right, pad_top, pad_bottom), 0.0)\n input_padded = padder(input_upsample)\n if self.resize_back:\n input_padded_resize = torch.nn.functional.interpolate(\n input_padded, size=self.shape, mode='nearest')\n return input_padded_resize\n else:\n return input_padded"
] | [
[
"torch.nn.CrossEntropyLoss",
"numpy.absolute",
"scipy.stats.norm.pdf",
"numpy.linspace",
"torch.nn.ConstantPad2d",
"numpy.clip",
"torch.from_numpy",
"numpy.stack",
"scipy.ndimage.convolve",
"numpy.sign",
"numpy.random.uniform",
"numpy.copy",
"numpy.random.rand",
"torch.nn.functional.interpolate",
"numpy.outer",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
sandeepnair2812/Weibull-Time-To-Event-Recurrent-Neural-Network | [
"162f5c17f21db79a316d563b60835d178142fd69"
] | [
"python/tests/test_tensorflow.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport pytest\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom wtte.objectives.tensorflow import loglik_continuous, loglik_discrete\nfrom wtte.data_generators import generate_weibull\n\n# SANITY CHECK: Use pure Weibull data censored at C(ensoring point).\n# Should converge to the generating A(alpha) and B(eta) for each timestep\n\nn_samples = 1000\nn_features = 1\nreal_a = 3.\nreal_b = 2.\ncensoring_point = real_a * 2\n\n\ndef tf_loglik_runner(loglik_fun, discrete_time):\n sess = tf.Session()\n np.random.seed(1)\n tf.set_random_seed(1)\n\n y_ = tf.placeholder(tf.float32, shape=(None, 1))\n u_ = tf.placeholder(tf.float32, shape=(None, 1))\n\n a = tf.exp(tf.Variable(tf.ones([1]), name='a_weight'))\n b = tf.exp(tf.Variable(tf.ones([1]), name='b_weight'))\n\n # testing part:\n loglik = loglik_fun(a, b, y_, u_)\n\n loss = -tf.reduce_mean(loglik)\n\n optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.005)\n\n train_step = optimizer.minimize(loss)\n\n # Launch the graph in a session.\n np.random.seed(1)\n\n # Initializes global variables in the graph.\n sess.run(tf.global_variables_initializer())\n\n tte_actual, tte_censored, u_train = generate_weibull(\n A=real_a,\n B=real_b,\n C=censoring_point, # <np.inf -> impose censoring\n shape=[n_samples, n_features],\n discrete_time=discrete_time)\n\n # Fit\n for step in range(1000):\n loss_val, _, a_val, b_val = sess.run([loss, train_step, a, b], feed_dict={\n y_: tte_censored, u_: u_train})\n\n if step % 100 == 0:\n print(step, loss_val, a_val, b_val)\n\n print(np.abs(real_a - a_val), np.abs(real_b - b_val))\n assert np.abs(real_a - a_val) < 0.05, 'alpha not converged'\n assert np.abs(real_b - b_val) < 0.05, 'beta not converged'\n sess.close()\n tf.reset_default_graph()\n\n\ndef test_loglik_continuous():\n tf_loglik_runner(loglik_continuous, discrete_time=False)\n\n\ndef test_loglik_discrete():\n tf_loglik_runner(loglik_discrete, discrete_time=True)\n"
] | [
[
"numpy.abs",
"numpy.random.seed",
"tensorflow.reduce_mean",
"tensorflow.placeholder",
"tensorflow.ones",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.reset_default_graph",
"tensorflow.Session",
"tensorflow.set_random_seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
ying-wen/pmln | [
"76d82dd620504ac00035d9d0dc9d752cd53518d4"
] | [
"ctr_fm.py"
] | [
"import numpy as np\nimport pandas as pd\nfrom time import time\nfrom sklearn import metrics\nfrom sample_encoding_keras import *\nfrom utility import Options, ctr_batch_generator, read_feat_index, eval_auc\nimport tensorflow as tf\n\nopts = Options()\nprint('Loading data...')\n\n#############################\n# Settings for ipinyou\n#############################\nopts.sequence_length = 22\nopts.interaction_times = 1\nopts.total_training_sample = 127127\n# change here for different camp\n# 1458 2261 2997 3386 3476 2259 2821 3358 3427 all\nBASE_PATH = './data/make-ipinyou-data/all/'\nopts.field_indices_path = BASE_PATH + 'field_indices.txt'\nopts.train_path = BASE_PATH + 'train.yzx.10.txt'\nopts.test_path = BASE_PATH + 'test.yzx.txt'\nopts.featindex = BASE_PATH + 'featindex.txt'\nopts.model_name = 'ipinyou_fm'\n\n### Paramerters for tuning\nopts.batch_size = 256\nopts.embedding_size = 8\n#opts.learning_rate = 0.1\n\n\n#############################\n# Settings for criteo\n#############################\n'''\nopts.sequence_length = 35\nopts.total_training_sample = 39799999\nBASE_PATH = './data/criteo/'\nopts.field_indices_path = BASE_PATH + 'field_indices.txt'\nopts.train_path = BASE_PATH + 'train.index.txt'\nopts.test_path = BASE_PATH + 'test.index.txt'\nopts.featindex = BASE_PATH + 'featindex.txt'\nopts.model_name = 'criteo_fm'\n\n### Paramerters for tuning\nopts.batch_size = 2048\nopts.embedding_size = 8\n#opts.learning_rate = 0.1\n'''\n\ngpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)\nsess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\nK.set_session(sess)\nid2cate, cate2id, opts.vocabulary_size = read_feat_index(opts)\n\n\n#############################\n# Model\n#############################\nfrom keras.models import Sequential, Model\nfrom keras.layers import Input, Dense, Embedding, Dropout, Activation, Flatten, Merge, merge\nfrom keras.layers import Convolution1D, MaxPooling1D\n\nmodel = Sequential()\nmodel.add(FM(opts.vocabulary_size,opts.embedding_size,input_length=opts.sequence_length,dropout=0.1, name='prob'))\nmodel.compile(optimizer='nadam',\n loss='binary_crossentropy',\n metrics=['accuracy'])\nprint(model.summary())\nopts.current_epoch = 1\n\n\n#############################\n# Model Training\n#############################\nfor i in range(opts.epochs_to_train):\n print(\"Current Global Epoch: \", opts.current_epoch)\n training_batch_generator = ctr_batch_generator(opts)\n history = model.fit_generator(training_batch_generator, opts.total_training_sample, 1)\n if i % 1 == 0:\n opts.batch_size = 4096\n model.save('model_'+opts.model_name+'_' + str(opts.current_epoch) + '.h5')\n eval_auc(model, opts)\n opts.current_epoch += 1\n"
] | [
[
"tensorflow.ConfigProto",
"tensorflow.GPUOptions"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yuki-inaho/zense_grpc_rgbd_ir | [
"76739df6b1a402177d2cf47c791faa252688d5b3"
] | [
"scripts/fitter/cameraparam.py"
] | [
"import numpy as np\nimport math\nimport toml\n\n\ndef get_camera_param(toml_path):\n dict_toml = toml.load(open(toml_path))\n n_camera = int(dict_toml['General']['n_camera'])\n camera_param = {}\n for i_cam in range(n_camera):\n key = 'Camera{}'.format(i_cam)\n camera_name = dict_toml[key]['camera_name']\n height = int(dict_toml[key]['height'])\n width = int(dict_toml[key]['width'])\n fx = float(dict_toml[key]['fx'])\n fy = float(dict_toml[key]['fy'])\n cx = float(dict_toml[key]['cx'])\n cy = float(dict_toml[key]['cy'])\n roll = float(dict_toml[key]['rot_angle_roll'])\n pitch = float(dict_toml[key]['rot_angle_pitch'])\n yaw = float(dict_toml[key]['rot_angle_yaw'])\n tx = float(dict_toml[key]['translation_x'])\n ty = float(dict_toml[key]['translation_y'])\n tz = float(dict_toml[key]['translation_z'])\n\n K = (fx, 0., cx, 0., fy, cy, 0., 0., 1.)\n R = (1., 0., 0., 0., 1., 0., 0., 0., 1.)\n P = (fx, 0., cx, 0., 0., fy, cy, 0., 0., 0., 1., 0.)\n size = (height, width)\n\n camera_param[camera_name] = CameraParam()\n camera_param[camera_name].set_camera_param(K, R, P, size)\n camera_param[camera_name].set_tf_rot_and_trans([roll, pitch, yaw], [tx, ty, tz])\n return camera_param\n\nclass CameraParam:\n def __init__(self):\n self._K = None\n self._R = None\n self._P = None\n self._shape = None\n self._transform_matrix = np.identity(4, dtype=np.float64)\n self.set_tf_matrix(self._transform_matrix)\n\n def set_camera_param(self, k, r, p, shape):\n self._K = k\n self._R = r\n self._P = p\n self._shape = shape\n\n def set_tf_rot_and_trans(self, rpy, xyz):\n tfm_mtx = np.identity(4, dtype=np.float64)\n tfm_mtx[:3,:3] = np.dot(self._axis_rot_mtx(2, rpy[2]), np.dot(self._axis_rot_mtx(1, rpy[1]), self._axis_rot_mtx(0, rpy[0])))\n tfm_mtx[:3,3] = np.array(xyz)\n self.set_tf_matrix(tfm_mtx)\n\n def _axis_rot_mtx(self, axis, deg):\n rad = deg / 180. * math.pi\n mtx = np.zeros((3,3), dtype=np.float64)\n i_0 = axis % 3\n i_1 = (axis + 1) % 3\n i_2 = (axis + 2) % 3\n mtx[i_0, i_0] = 1.\n mtx[i_1, i_1] = +math.cos(rad)\n mtx[i_1, i_2] = -math.sin(rad)\n mtx[i_2, i_1] = +math.sin(rad)\n mtx[i_2, i_2] = +math.cos(rad)\n return mtx\n\n def set_tf_matrix(self, matrix):\n assert matrix.shape == (4, 4)\n self._transform_matrix[:] = matrix[:]\n self._inv_rot_mtx = np.linalg.inv(self.rot_mtx)\n\n @property\n def translation(self):\n return self._transform_matrix[:3,3]\n\n @property\n def rot_mtx(self):\n return self._transform_matrix[:3,:3]\n\n @property\n def inv_rot_mtx(self):\n return self._inv_rot_mtx\n\n @property\n def height(self):\n return self._shape[0]\n\n @property\n def width(self):\n return self._shape[1]\n\n @property\n def shape(self):\n return self._shape\n\n @property\n def size(self):\n return self._shape[0] * self._shape[1]\n\n @property\n def focal_xy(self):\n return self._K[0], self._K[4]\n\n @property\n def center_xy(self):\n return self._K[2], self._K[5]\n"
] | [
[
"numpy.linalg.inv",
"numpy.array",
"numpy.identity",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chinjui/gym_torcs | [
"64260a1963cadfd9c528e3a694629049a0c20032"
] | [
"ActorNetwork.py"
] | [
"import numpy as np\nimport math\nfrom keras.initializers import normal, identity\nfrom keras.models import model_from_json\nfrom keras.models import Sequential, Model\n# from keras.engine.training import collect_trainable_weights\nfrom keras.layers import Dense, Flatten, Input, merge, Lambda\nfrom keras.optimizers import Adam\nimport tensorflow as tf\nimport keras.backend as K\n\nHIDDEN1_UNITS = 300\nHIDDEN2_UNITS = 600\n\nclass ActorNetwork(object):\n def __init__(self, sess, state_size, action_size, BATCH_SIZE, TAU, LEARNING_RATE):\n self.sess = sess\n self.BATCH_SIZE = BATCH_SIZE\n self.TAU = TAU\n self.LEARNING_RATE = LEARNING_RATE\n\n K.set_session(sess)\n\n #Now create the model\n self.model , self.weights, self.state = self.create_actor_network(state_size, action_size)\n self.target_model, self.target_weights, self.target_state = self.create_actor_network(state_size, action_size)\n self.action_gradient = tf.placeholder(tf.float32,[None, action_size])\n self.params_grad = tf.gradients(self.model.output, self.weights, -self.action_gradient)\n grads = zip(self.params_grad, self.weights)\n self.optimize = tf.train.AdamOptimizer(LEARNING_RATE).apply_gradients(grads)\n self.sess.run(tf.initialize_all_variables())\n\n def train(self, states, action_grads):\n self.sess.run(self.optimize, feed_dict={\n self.state: states,\n self.action_gradient: action_grads\n })\n\n def target_train(self):\n actor_weights = self.model.get_weights()\n actor_target_weights = self.target_model.get_weights()\n for i in xrange(len(actor_weights)):\n actor_target_weights[i] = self.TAU * actor_weights[i] + (1 - self.TAU)* actor_target_weights[i]\n self.target_model.set_weights(actor_target_weights)\n\n def create_actor_network(self, state_size,action_dim):\n print(\"Now we build the model\")\n S = Input(shape=[state_size])\n h0 = Dense(HIDDEN1_UNITS, activation='relu')(S)\n h1 = Dense(HIDDEN2_UNITS, activation='relu')(h0)\n # Steering = Dense(1,activation='tanh',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)\n Steering = Dense(1,activation='tanh')(h1)\n # Acceleration = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)\n Acceleration = Dense(1,activation='sigmoid')(h1)\n #Brake = Dense(1,activation='sigmoid',init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)\n Brake = Dense(1,activation='sigmoid')(h1)\n V = merge([Steering,Acceleration,Brake],mode='concat')\n model = Model(input=S,output=V)\n return model, model.trainable_weights, S\n\n"
] | [
[
"tensorflow.initialize_all_variables",
"tensorflow.gradients",
"tensorflow.placeholder",
"tensorflow.train.AdamOptimizer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
julianschumann/ae-opt | [
"611b6c893546267732a2d690df20a4cc238002e6"
] | [
"Compliance_minimization/Figure_6/autoencoder_training.py"
] | [
"import numpy as np\nfrom mpi4py import MPI\nfrom keras.models import Model\nfrom keras.layers import Input\nfrom keras.optimizers import SGD\nimport tensorflow as tf\nimport keras.backend as K \n\n## These functions are used to train specific neural netoworks\n## See Appendices A.6, A.7, and A.9 of my master thesis \n\n## Only the traing of the adversarial autoencoder with surrogate model will recieve code comments,\n## as the other traiing function are basically the same only with cutout parts\n\n\n \ndef train_AAE_surr(AE,Encoder,Discriminator,surrogate,X_rank,W_rank,C_rank,comm,rank,size,perrank,n_epochs):\n '''\n Trains an adversarial autoencoder with an additional surrogate model network and discriminator.\n\n Parameters\n ----------\n AE : Model\n Autoencoder constisting out of encoder and decoder.\n Encoder : Model\n Encoder part of the autoencoder AE.\n Discriminator : Model\n The disriminator network.\n surrogate : Model\n The surrogate model network.\n X_rank : perrank*nely*nelx float\n perrank training samples for the autoencoder.\n W_rank : perrank*nely*nelx float\n Weights assigned to each training sample.\n C_rank : perrank float\n The compliance of each training sample.\n comm : MPI_WORLD\n Envrionment to communicate with different processors.\n rank : int\n The processor used here.\n size : int\n Total number of processors.\n perrank : int\n Number of training samples in each rank.\n n_epochs : int\n Number of training epochs.\n\n Returns\n -------\n AE_weights : Model.weights\n Autoencoder parameters after the final epoch.\n\n ''' \n ## Combine the compliance values from all ranks\n if rank==0:\n C_rec=np.empty((size,perrank,1))\n else:\n C_rec=None\n comm.Barrier()\n comm.Gather(C_rank,C_rec,root=0)\n \n ## Determine overall maximum and minimum compliance of training samples and broadcast to all ranks\n if rank==0:\n C_min=np.min(C_rec)\n C_max=np.max(C_rec)\n else:\n C_min=None\n C_max=None\n C_min=comm.bcast(C_min,root=0)\n C_max=comm.bcast(C_max,root=0)\n \n ## Normalize the compliance values so that they belong to [0,1], allowing the surrogate model network to reproduce them \n C_rank=0.1+(C_rank-C_min)*0.8/(C_max-C_min)\n \n ## Build a model from Encoder and discriminator called Generator, where only the encoder is trainable\n Design_space=Input((X_rank.shape[1],X_rank.shape[2]))\n Latent_space=Input(Discriminator.input.shape[1])\n Disl=Model(Latent_space,Discriminator(Latent_space))\n Disl.trainable=False\n Discriminator.trainable=True\n Generator=Model(Design_space,Disl(Encoder(Design_space)))\n \n ## Build a combined model of Encoder and surrgate model network\n Surr=Model(Design_space,surrogate(Encoder(Design_space)))\n \n \n ## The likelyhood fro training discriminator or generator in each batch\n prob_dis=0.25\n \n ## The number of batches for each epoch and the corresponding batch size in each processor core (rank)\n num_batches=10\n batch_size_perrank=int(perrank/num_batches)\n \n \n ## Initialize parameters for using Adam when updating network parameters\n betam=0.9\n betav=0.999\n betamh=0.9\n betavh=0.999\n eta=0.001\n eta_surr=eta*prob_dis\n m=None\n v=None\n \n m_dis=None\n v_dis=None\n \n m_gen=None\n v_gen=None\n \n m_surr=None\n v_surr=None\n \n gen_start=False\n dis_start=False\n \n ## Initialize the optimizer. As Adam has to be implemented seperately, standard stochastic gradeint descent is used\n if rank==0:\n optimizer=SGD(learning_rate=eta,momentum=0.0) \n optimizer_surr=SGD(learning_rate=eta_surr,momentum=0.0)\n \n ## Prepare array of indecies for later reshuffleing \n Index=np.arange(perrank)\n \n ## Wait for all ranks to catch up\n comm.Barrier()\n \n ## Start training the networks\n for epoch in range(n_epochs): \n \n ##Generate a random order of training samples\n np.random.shuffle(Index)\n \n ## Determine if training with single batch should be performed for achieving convergence in later part of trainig\n if epoch+1>0.9*n_epochs:\n num_batches=1\n batch_size_perrank=perrank\n \n ## Start going through each separate batch\n for batch in range(num_batches):\n \n ## Get training samples and corresponding weights and compliance values for batch\n X_batch=np.copy(X_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n W_batch=np.copy(W_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n C_batch=np.copy(C_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n \n ## Update network parameters for all ranks so they are equal to rank=0 by broadcasting rank=0\n if rank==0:\n AE_weights=AE.get_weights()\n Discriminator_weights=Discriminator.get_weights()\n surrogate_weights=surrogate.get_weights()\n else:\n AE_weights=None\n Discriminator_weights=None\n surrogate_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n Discriminator_weights=comm.bcast(Discriminator_weights,root=0)\n surrogate_weights=comm.bcast(surrogate_weights,root=0)\n AE.set_weights(AE_weights)\n Discriminator.set_weights(Discriminator_weights)\n surrogate.set_weights(surrogate_weights)\n \n ## Get the gradient for encoder and decoder respective the reconstruction error (mean squared error)\n ## Get loss function\n with tf.GradientTape() as tape:\n X_batch_pred=AE(X_batch)\n loss_batch=K.mean((X_batch-X_batch_pred)**2*W_batch)/size\n ## Get gradient of loss function\n grad=np.array(tape.gradient(loss_batch,AE.trainable_weights),dtype=object) \n ## Combine gradients over all ranks\n Gradients=comm.gather(grad,root=0)\n if rank==0:\n Grad=np.sum(Gradients,0)\n \n ## Use Adam to update parameters of autoencoder in rank=0\n if epoch==0 and batch==0:\n m=(1-betam)*Grad \n v=(1-betav)*Grad*Grad\n else:\n m=betam*m+(1-betam)*Grad \n v=betav*v+(1-betav)*Grad*Grad\n mh=m/(1-betamh)\n vh=v/(1-betavh)\n grad_diff=(1/(vh**0.5+1e-8)*mh).tolist()\n optimizer.apply_gradients(zip(grad_diff,AE.trainable_weights))\n \n ## Broadcast rank=0 autoencoder parameters and update remaining ranks\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n AE.set_weights(AE_weights)\n \n ## Wait for all ranks to catch up\n comm.Barrier()\n \n ## Train the surrogate model and encoder\n ## Get the loss function (mean squared error between rel and predicted compliance values)\n with tf.GradientTape() as tape:\n C_batch_pred=Surr(X_batch)\n loss_batch=K.mean((C_batch-C_batch_pred)**2)/size\n ## Get the gradient of this loss function of encoder and surrogate model network\n grad_surr=np.array(tape.gradient(loss_batch,Surr.trainable_weights),dtype=object)\n ## Combine gradients over all ranks in rank=0\n Gradients_surr=comm.gather(grad_surr,root=0)\n if rank==0:\n Grad_surr=np.sum(Gradients_surr,0)\n \n ## Use Adam to update parameters of networks in rank=0\n if epoch==0 and batch==0:\n m_surr=(1-betam)*Grad_surr \n v_surr=(1-betav)*Grad_surr*Grad_surr\n else:\n m_surr=betam*m_surr+(1-betam)*Grad_surr \n v_surr=betav*v_surr+(1-betav)*Grad_surr*Grad_surr\n mh_surr=m_surr/(1-betamh)\n vh_surr=v_surr/(1-betavh)\n betamh=betamh*betam\n betavh=betavh*betav\n grad_diff_surr=(1/(vh_surr**0.5+1e-8)*mh_surr).tolist()\n optimizer_surr.apply_gradients(zip(grad_diff_surr,Surr.trainable_weights)) \n \n ## Broadcast rank=0 network parameters and update remaining ranks\n if rank==0:\n Surr_weights=Surr.get_weights()\n else:\n Surr_weights=None\n Surr_weights=comm.bcast(Surr_weights,root=0)\n Surr.set_weights(Surr_weights)\n \n ## Wait for all ranks to catch up\n comm.Barrier()\n \n ## Generate random number in rank 0 and broadcast it to other ranks\n if rank==0:\n prob_rand=np.random.rand()\n else:\n prob_rand=None\n prob_rand=comm.bcast(prob_rand,root=0) \n \n ## Check if discriminator or generator have to be trained\n if np.abs(0.5-prob_rand)>(0.5-prob_dis):\n if prob_rand<prob_dis:\n ## Train the generator (that means only encoder parameters)\n ## Get loss function\n with tf.GradientTape() as tape:\n Gen_batch_pred=Generator(X_batch)\n loss_batch_gen=K.mean(K.log(1+1e-5-Gen_batch_pred))/size\n ## Get gradient of loss function in respect to network\n grad_gen=np.array(tape.gradient(loss_batch_gen,Generator.trainable_weights),dtype=object)\n ## Combine gradeints over all ranks in rank=0\n Gradients_gen=comm.gather(grad_gen,root=0)\n if rank==0:\n Grad_gen=np.sum(Gradients_gen,0)\n \n ## Use Adam to update parameters of generator(=encoder) in rank=0 \n if gen_start==False:\n m_gen=(1-betam)*Grad_gen \n v_gen=(1-betav)*Grad_gen*Grad_gen\n gen_start=True\n else:\n m_gen=betam*m_gen+(1-betam)*Grad_gen \n v_gen=betav*v_gen+(1-betav)*Grad_gen*Grad_gen\n mh_gen=m_gen/(1-betamh)\n vh_gen=v_gen/(1-betavh)\n grad_diff_gen=(1/(vh_gen**0.5+1e-8)*mh_gen).tolist()\n optimizer.apply_gradients(zip(grad_diff_gen,Generator.trainable_weights))\n \n if 1-prob_rand<prob_dis:\n ## Train the discriminaor\n ## Get encoded training samples\n Z_batch=Encoder.predict(X_batch)\n ## Get random samples generated according to desired latent space distribution (uniform)\n Z_rand=np.random.rand(Z_batch.shape[0],Z_batch.shape[1])\n \n ## Get loss function\n with tf.GradientTape() as tape:\n Dis_batch=Discriminator(Z_batch)\n Dis_rand=Discriminator(Z_rand)\n loss_batch_dis=(K.mean(K.log(Dis_batch+1e-5))+K.mean(K.log(1+1e-5-Dis_rand)))/(2*size)\n ## Get gradient of loss function in respect to disciminator parameters\n grad_dis=np.array(tape.gradient(loss_batch_dis,Discriminator.trainable_weights),dtype=object)\n ## Combine gradients over all ranks in rank=0\n Gradients_dis=comm.gather(grad_dis,root=0)\n if rank==0:\n Grad_dis=np.sum(Gradients_dis,0)\n \n ## Use Adam to update discriminator parameters\n if dis_start==False:\n m_dis=(1-betam)*Grad_dis \n v_dis=(1-betav)*Grad_dis*Grad_dis\n dis_start=True\n else:\n m_dis=betam*m_dis+(1-betam)*Grad_dis \n v_dis=betav*v_dis+(1-betav)*Grad_dis*Grad_dis\n mh_dis=m_dis/(1-betamh)\n vh_dis=v_dis/(1-betavh)\n grad_diff_dis=(1/(vh_dis**0.5+1e-8)*mh_dis).tolist()\n optimizer.apply_gradients(zip(grad_diff_dis,Discriminator.trainable_weights)) \n ## Wait for all ranks to catch up\n comm.Barrier()\n comm.Barrier()\n ## Update autoencoder network parameters once again (only really necessary if last batch included generator training) and brodcast\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0) \n \n ## Return autoencoder parameters\n return AE_weights\n\ndef train_autoencoder(AE,X_rank,W_rank,comm,rank,size,perrank,n_epochs): \n '''\n Trains an autoencoder without any additional networks.\n\n Parameters\n ----------\n AE : Model\n Autoencoder constisting out of encoder and decoder.\n X_rank : perrank*nely*nelx float\n perrank training samples for the autoencoder.\n W_rank : perrank*nely*nelx float\n Weights assigned to each training sample.\n comm : MPI_WORLD\n Envrionment to communicate with different processors.\n rank : int\n The processor used here.\n size : int\n Total number of processors.\n perrank : int\n Number of training samples in each rank.\n n_epochs : int\n Number of training epochs.\n\n Returns\n -------\n AE_weights : Model.weights\n Autoencoder parameters after the final epoch.\n\n '''\n num_batches=10\n batch_size_perrank=int(perrank/num_batches)\n \n betam=0.9\n betav=0.999\n betamh=0.9\n betavh=0.999\n eta=0.001\n m=None\n v=None\n Index=np.arange(perrank)\n \n if rank==0:\n optimizer=SGD(learning_rate=eta,momentum=0.0)\n \n comm.Barrier()\n for epoch in range(n_epochs): \n np.random.shuffle(Index)\n if epoch+1>0.9*n_epochs:\n num_batches=1\n batch_size_perrank=perrank\n for batch in range(num_batches):\n X_batch=np.copy(X_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n W_batch=np.copy(W_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n AE.set_weights(AE_weights)\n \n with tf.GradientTape() as tape:\n X_batch_pred=AE(X_batch)\n loss_batch=K.mean((X_batch-X_batch_pred)**2*W_batch)/size\n grad=np.array(tape.gradient(loss_batch,AE.trainable_weights),dtype=object) \n Gradients=comm.gather(grad,root=0)\n if rank==0:\n Grad=np.sum(Gradients,0)\n if epoch==0 and batch==0:\n m=(1-betam)*Grad \n v=(1-betav)*Grad*Grad\n else:\n m=betam*m+(1-betam)*Grad \n v=betav*v+(1-betav)*Grad*Grad\n mh=m/(1-betamh)\n vh=v/(1-betavh)\n betamh=betamh*betam\n betavh=betavh*betav\n grad_diff=(1/(vh**0.5+1e-8)*mh).tolist()\n optimizer.apply_gradients(zip(grad_diff,AE.trainable_weights)) \n comm.Barrier()\n comm.Barrier()\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n return AE_weights\n\ndef train_AAE(AE,Encoder,Discriminator,X_rank,W_rank,comm,rank,size,perrank,n_epochs): \n '''\n Trains an adversarial autoencoder which includes a discriminator network.\n\n Parameters\n ----------\n AE : Model\n Autoencoder constisting out of encoder and decoder.\n Encoder : Model\n Encoder part of the autoencoder AE.\n Discriminator : Model\n The disriminator network.\n X_rank : perrank*nely*nelx float\n perrank training samples for the autoencoder.\n W_rank : perrank*nely*nelx float\n Weights assigned to each training sample.\n comm : MPI_WORLD\n Envrionment to communicate with different processors.\n rank : int\n The processor used here.\n size : int\n Total number of processors.\n perrank : int\n Number of training samples in each rank.\n n_epochs : int\n Number of training epochs.\n\n Returns\n -------\n AE_weights : Model.weights\n Autoencoder parameters after the final epoch.\n\n '''\n Design_space=Input((X_rank.shape[1],X_rank.shape[2]))\n Latent_space=Input(Discriminator.input.shape[1])\n Disl=Model(Latent_space,Discriminator(Latent_space))\n Disl.trainable=False\n Discriminator.trainable=True\n Generator=Model(Design_space,Disl(Encoder(Design_space)))\n \n prob_dis=0.25\n \n num_batches=10\n batch_size_perrank=int(perrank/num_batches)\n \n betam=0.9\n betav=0.999\n betamh=0.9\n betavh=0.999\n eta=0.001\n m=None\n v=None\n \n m_dis=None\n v_dis=None\n \n m_gen=None\n v_gen=None\n \n gen_start=False\n dis_start=False\n \n Index=np.arange(perrank)\n if rank==0:\n optimizer=SGD(learning_rate=eta,momentum=0.0)\n \n comm.Barrier()\n for epoch in range(n_epochs): \n np.random.shuffle(Index)\n if epoch+1>0.9*n_epochs:\n num_batches=1\n batch_size_perrank=perrank\n for batch in range(num_batches):\n X_batch=np.copy(X_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n W_batch=np.copy(W_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n if rank==0:\n AE_weights=AE.get_weights()\n Discriminator_weights=Discriminator.get_weights()\n else:\n AE_weights=None\n Discriminator_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n Discriminator_weights=comm.bcast(Discriminator_weights,root=0)\n AE.set_weights(AE_weights)\n Discriminator.set_weights(Discriminator_weights)\n \n with tf.GradientTape() as tape:\n X_batch_pred=AE(X_batch)\n loss_batch=K.mean((X_batch-X_batch_pred)**2*W_batch)/size\n grad=np.array(tape.gradient(loss_batch,AE.trainable_weights),dtype=object)\n Gradients=comm.gather(grad,root=0)\n if rank==0:\n Grad=np.sum(Gradients,0)\n if epoch==0 and batch==0:\n m=(1-betam)*Grad \n v=(1-betav)*Grad*Grad\n else:\n m=betam*m+(1-betam)*Grad \n v=betav*v+(1-betav)*Grad*Grad\n mh=m/(1-betamh)\n vh=v/(1-betavh)\n betamh=betamh*betam\n betavh=betavh*betav\n grad_diff=(1/(vh**0.5+1e-8)*mh).tolist()\n optimizer.apply_gradients(zip(grad_diff,AE.trainable_weights)) \n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n AE.set_weights(AE_weights)\n comm.Barrier()\n if rank==0:\n prob_rand=np.random.rand()\n else:\n prob_rand=None\n prob_rand=comm.bcast(prob_rand,root=0) \n if np.abs(0.5-prob_rand)>(0.5-prob_dis):\n if prob_rand<prob_dis:\n with tf.GradientTape() as tape:\n Gen_batch_pred=Generator(X_batch)\n loss_batch_gen=K.mean(K.log(1+1e-5-Gen_batch_pred))/size\n grad_gen=np.array(tape.gradient(loss_batch_gen,Generator.trainable_weights),dtype=object)\n Gradients_gen=comm.gather(grad_gen,root=0)\n if rank==0:\n Grad_gen=np.sum(Gradients_gen,0)\n if gen_start==False:\n m_gen=(1-betam)*Grad_gen \n v_gen=(1-betav)*Grad_gen*Grad_gen\n gen_start=True\n else:\n m_gen=betam*m_gen+(1-betam)*Grad_gen \n v_gen=betav*v_gen+(1-betav)*Grad_gen*Grad_gen\n mh_gen=m_gen/(1-betamh)\n vh_gen=v_gen/(1-betavh)\n grad_diff_gen=(1/(vh_gen**0.5+1e-8)*mh_gen).tolist()\n optimizer.apply_gradients(zip(grad_diff_gen,Generator.trainable_weights))\n if 1-prob_rand<prob_dis:\n Z_batch=Encoder.predict(X_batch)\n Z_rand=np.random.rand(Z_batch.shape[0],Z_batch.shape[1])\n with tf.GradientTape() as tape:\n Dis_batch=Discriminator(Z_batch)\n Dis_rand=Discriminator(Z_rand)\n loss_batch_dis=(K.mean(K.log(Dis_batch+1e-5))+K.mean(K.log(1+1e-5-Dis_rand)))/(2*size)\n grad_dis=np.array(tape.gradient(loss_batch_dis,Discriminator.trainable_weights),dtype=object)\n Gradients_dis=comm.gather(grad_dis,root=0)\n if rank==0:\n Grad_dis=np.sum(Gradients_dis,0)\n if dis_start==False:\n m_dis=(1-betam)*Grad_dis \n v_dis=(1-betav)*Grad_dis*Grad_dis\n dis_start=True\n else:\n m_dis=betam*m_dis+(1-betam)*Grad_dis \n v_dis=betav*v_dis+(1-betav)*Grad_dis*Grad_dis\n mh_dis=m_dis/(1-betamh)\n vh_dis=v_dis/(1-betavh)\n grad_diff_dis=(1/(vh_dis**0.5+1e-8)*mh_dis).tolist()\n optimizer.apply_gradients(zip(grad_diff_dis,Discriminator.trainable_weights)) \n comm.Barrier()\n comm.Barrier()\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0) \n return AE_weights\n\ndef train_autoencoder_surr(AE,Encoder,surrogate,X_rank,W_rank,C_rank,comm,rank,size,perrank,n_epochs): \n '''\n Trains an autoencoder with an additional surrogate model network.\n\n Parameters\n ----------\n AE : Model\n Autoencoder constisting out of encoder and decoder.\n Encoder : Model\n Encoder part of the autoencoder AE.\n surrogate : Model\n The surrogate model network.\n X_rank : perrank*nely*nelx float\n perrank training samples for the autoencoder.\n W_rank : perrank*nely*nelx float\n Weights assigned to each training sample.\n C_rank : perrank float\n The compliance of each training sample.\n comm : MPI_WORLD\n Envrionment to communicate with different processors.\n rank : int\n The processor used here.\n size : int\n Total number of processors.\n perrank : int\n Number of training samples in each rank.\n n_epochs : int\n Number of training epochs.\n\n Returns\n -------\n AE_weights : Model.weights\n Autoencoder parameters after the final epoch.\n\n '''\n if rank==0:\n C_rec=np.empty((size,perrank,1))\n else:\n C_rec=None\n comm.Barrier()\n comm.Gather(C_rank,C_rec,root=0)\n if rank==0:\n C_min=np.min(C_rec)\n C_max=np.max(C_rec)\n else:\n C_min=None\n C_max=None\n C_min=comm.bcast(C_min,root=0)\n C_max=comm.bcast(C_max,root=0)\n \n C_rank=0.1+(C_rank-C_min)*0.8/(C_max-C_min)\n \n \n Design_space=Input((X_rank.shape[1],X_rank.shape[2]))\n Surr=Model(Design_space,surrogate(Encoder(Design_space)))\n \n num_batches=10\n batch_size_perrank=int(perrank/num_batches)\n \n betam=0.9\n betav=0.999\n betamh=0.9\n betavh=0.999\n eta=0.001\n eta_surr=eta*0.25\n m=None\n v=None\n \n m_surr=None\n v_surr=None\n \n \n Index=np.arange(perrank)\n if rank==0:\n optimizer=SGD(learning_rate=eta,momentum=0.0) \n optimizer_surr=SGD(learning_rate=eta_surr,momentum=0.0)\n \n comm.Barrier()\n for epoch in range(n_epochs): \n np.random.shuffle(Index)\n if epoch+1>0.9*n_epochs:\n num_batches=1\n batch_size_perrank=perrank\n for batch in range(num_batches):\n X_batch=np.copy(X_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n W_batch=np.copy(W_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n C_batch=np.copy(C_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])\n if rank==0:\n AE_weights=AE.get_weights()\n surrogate_weights=surrogate.get_weights()\n else:\n AE_weights=None\n surrogate_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n surrogate_weights=comm.bcast(surrogate_weights,root=0)\n AE.set_weights(AE_weights)\n surrogate.set_weights(surrogate_weights)\n \n with tf.GradientTape() as tape:\n X_batch_pred=AE(X_batch)\n loss_batch=K.mean((X_batch-X_batch_pred)**2*W_batch)/size\n grad=np.array(tape.gradient(loss_batch,AE.trainable_weights),dtype=object) \n Gradients=comm.gather(grad,root=0)\n if rank==0:\n Grad=np.sum(Gradients,0)\n if epoch==0 and batch==0:\n m=(1-betam)*Grad \n v=(1-betav)*Grad*Grad\n else:\n m=betam*m+(1-betam)*Grad \n v=betav*v+(1-betav)*Grad*Grad\n mh=m/(1-betamh)\n vh=v/(1-betavh)\n grad_diff=(1/(vh**0.5+1e-8)*mh).tolist()\n optimizer.apply_gradients(zip(grad_diff,AE.trainable_weights)) \n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0)\n AE.set_weights(AE_weights)\n comm.Barrier()\n #Surrogate\n with tf.GradientTape() as tape:\n C_batch_pred=Surr(X_batch)\n loss_batch=K.mean((C_batch-C_batch_pred)**2)/size\n grad_surr=np.array(tape.gradient(loss_batch,Surr.trainable_weights),dtype=object) \n Gradients_surr=comm.gather(grad_surr,root=0)\n if rank==0:\n Grad_surr=np.sum(Gradients_surr,0)\n if epoch==0 and batch==0:\n m_surr=(1-betam)*Grad_surr \n v_surr=(1-betav)*Grad_surr*Grad_surr\n else:\n m_surr=betam*m_surr+(1-betam)*Grad_surr \n v_surr=betav*v_surr+(1-betav)*Grad_surr*Grad_surr\n mh_surr=m_surr/(1-betamh)\n vh_surr=v_surr/(1-betavh)\n betamh=betamh*betam\n betavh=betavh*betav\n grad_diff_surr=(1/(vh_surr**0.5+1e-8)*mh_surr).tolist()\n optimizer_surr.apply_gradients(zip(grad_diff_surr,Surr.trainable_weights)) \n if rank==0:\n Surr_weights=Surr.get_weights()\n else:\n Surr_weights=None\n Surr_weights=comm.bcast(Surr_weights,root=0)\n Surr.set_weights(Surr_weights)\n comm.Barrier()\n comm.Barrier()\n if rank==0:\n AE_weights=AE.get_weights()\n else:\n AE_weights=None\n AE_weights=comm.bcast(AE_weights,root=0) \n return AE_weights"
] | [
[
"numpy.abs",
"numpy.min",
"numpy.arange",
"numpy.random.shuffle",
"numpy.max",
"numpy.copy",
"numpy.random.rand",
"numpy.sum",
"numpy.empty",
"tensorflow.GradientTape"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"2.5",
"2.8",
"2.10"
]
}
] |
nsahoo/zfit | [
"fcad2578f31138f5383f7fa5de6c0f8c6b1dbaa4"
] | [
"zfit/core/data.py"
] | [
"# Copyright (c) 2021 zfit\nimport warnings\nfrom collections import OrderedDict\nfrom contextlib import ExitStack\nfrom typing import Callable, Dict, List, Mapping, Tuple, Union\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport uproot\nfrom tensorflow.python.ops import array_ops\n\n# from ..settings import types as ztypes\nimport zfit\nimport zfit.z.numpy as znp\n\nfrom .. import z\nfrom ..settings import ztypes\nfrom ..util import ztyping\nfrom ..util.cache import GraphCachable, invalidate_graph\nfrom ..util.container import convert_to_container\nfrom ..util.exception import (LogicalUndefinedOperationError,\n ObsIncompatibleError, ShapeIncompatibleError,\n WorkInProgressError)\nfrom ..util.temporary import TemporarilySet\nfrom .baseobject import BaseObject\nfrom .coordinates import convert_to_obs_str\nfrom .dimension import BaseDimensional\nfrom .interfaces import ZfitSpace, ZfitUnbinnedData\nfrom .tensorlike import register_tensor_conversion, OverloadableMixin\nfrom .space import Space, convert_to_space\n\n\n# TODO: make cut only once, then remember\nclass Data(GraphCachable, ZfitUnbinnedData, BaseDimensional, BaseObject, OverloadableMixin):\n BATCH_SIZE = 1000000 # 1 mio\n\n def __init__(self, dataset: Union[tf.data.Dataset, \"LightDataset\"], obs: ztyping.ObsTypeInput = None,\n name: str = None, weights=None, iterator_feed_dict: Dict = None,\n dtype: tf.DType = None):\n \"\"\"Create a data holder from a `dataset` used to feed into `models`.\n\n Args:\n dataset: A dataset storing the actual values\n obs: Observables where the data is defined in\n name: Name of the `Data`\n iterator_feed_dict:\n dtype: |dtype_arg_descr|\n \"\"\"\n if name is None:\n name = \"Data\"\n if dtype is None:\n dtype = ztypes.float\n super().__init__(name=name)\n # if iterator_feed_dict is None:\n # iterator_feed_dict = {}\n self._permutation_indices_data = None\n self._next_batch = None\n self._dtype = dtype\n self._nevents = None\n self._weights = None\n\n self._data_range = None\n self._set_space(obs)\n self._original_space = self.space\n self._data_range = self.space # TODO proper data cuts: currently set so that the cuts in all dims are applied\n self.batch_size = self.BATCH_SIZE\n self.dataset = dataset.batch(self.batch_size)\n self._name = name\n self.iterator_feed_dict = iterator_feed_dict\n self.iterator = None\n self.set_weights(weights=weights)\n\n @property\n def nevents(self):\n nevents = self._nevents\n if nevents is None:\n nevents = self._get_nevents()\n return nevents\n\n # TODO: which naming? nevents or n_events\n\n @property\n def _approx_nevents(self):\n return self.nevents\n\n @property\n def n_events(self):\n return self.nevents\n\n @property\n def has_weights(self):\n return self._weights is not None\n\n @property\n def dtype(self):\n return self._dtype\n\n def _set_space(self, obs: Space):\n obs = convert_to_space(obs)\n self._check_n_obs(space=obs)\n obs = obs.with_autofill_axes(overwrite=True)\n self._space = obs\n\n @property\n def data_range(self):\n data_range = self._data_range\n if data_range is None:\n data_range = self.space\n return data_range\n\n @invalidate_graph\n def set_data_range(self, data_range):\n data_range = self._check_input_data_range(data_range=data_range)\n\n def setter(value):\n self._data_range = value\n\n def getter():\n return self._data_range\n\n return TemporarilySet(value=data_range, setter=setter, getter=getter)\n\n @property\n def weights(self):\n # TODO: refactor below more general, when to apply a cut?\n if self.data_range.has_limits and self.has_weights:\n raw_values = self._value_internal(obs=self.data_range.obs, filter=False)\n is_inside = self.data_range.inside(raw_values)\n weights = self._weights[is_inside]\n else:\n weights = self._weights\n return weights\n\n @invalidate_graph\n def set_weights(self, weights: ztyping.WeightsInputType):\n \"\"\"Set (temporarily) the weights of the dataset.\n\n Args:\n weights:\n \"\"\"\n if weights is not None:\n weights = z.convert_to_tensor(weights)\n weights = z.to_real(weights)\n if weights.shape.ndims != 1:\n raise ShapeIncompatibleError(\"Weights have to be 1-Dim objects.\")\n\n def setter(value):\n self._weights = value\n\n def getter():\n return self.weights\n\n return TemporarilySet(value=weights, getter=getter, setter=setter)\n\n @property\n def space(self) -> \"ZfitSpace\":\n return self._space\n\n # constructors\n @classmethod\n def from_root_iter(cls, path, treepath, branches=None, entrysteps=None, name=None, **kwargs):\n # branches = convert_to_container(branches)\n raise RuntimeWarning(\"Currently, this is not supported.\")\n\n @classmethod\n def from_root(cls, path: str, treepath: str, branches: List[str] = None, branches_alias: Dict = None,\n weights: ztyping.WeightsStrInputType = None,\n name: str = None,\n dtype: tf.DType = None,\n root_dir_options=None) -> \"Data\":\n \"\"\"Create a `Data` from a ROOT file. Arguments are passed to `uproot`.\n\n The arguments are passed to uproot directly.\n\n Args:\n path:\n treepath:\n branches:\n branches_alias: A mapping from the `branches` (as keys) to the actual `observables` (as values).\n This allows to have different `observable` names, independent of the branch name in the file.\n weights: Weights of the data. Has to be 1-D and match the shape\n of the data (nevents). Can be a column of the ROOT file by using a string corresponding to a\n column.\n name:\n root_dir_options:\n\n Returns:\n `zfit.Data`:\n \"\"\"\n # TODO 0.6: use obs here instead of branches\n # if branches:\n # warnings.warn(FutureWarning(\"`branches` is deprecated, please use `obs` instead\"), stacklevel=2)\n # obs = branches\n # obs = convert_to_space(obs)\n # branches = obs.obs\n if branches_alias is None and branches is None:\n raise ValueError(\"Either branches or branches_alias has to be specified.\")\n\n if branches_alias is None:\n branches_alias = {}\n if branches is None:\n branches = list(branches_alias.values())\n\n weights_are_branch = isinstance(weights, str)\n\n branches = convert_to_container(branches)\n if root_dir_options is None:\n root_dir_options = {}\n\n def uproot_loader():\n with uproot.open(path, **root_dir_options)[treepath] as root_tree:\n if weights_are_branch:\n branches_with_weights = branches + [weights]\n else:\n branches_with_weights = branches\n branches_with_weights = tuple(branches_with_weights)\n data = root_tree.arrays(expressions=branches_with_weights, library='pd')\n data_np = data[branches].values\n if weights_are_branch:\n weights_np = data[weights]\n else:\n weights_np = None\n return data_np, weights_np\n\n data, weights_np = uproot_loader()\n if not weights_are_branch:\n weights_np = weights\n dataset = LightDataset.from_tensor(data)\n\n # dataset = dataset.repeat()\n obs = [branches_alias.get(branch, branch) for branch in branches]\n return Data(dataset=dataset, obs=obs, weights=weights_np, name=name, dtype=dtype)\n\n @classmethod\n def from_pandas(cls, df: pd.DataFrame, obs: ztyping.ObsTypeInput = None, weights: ztyping.WeightsInputType = None,\n name: str = None, dtype: tf.DType = None):\n \"\"\"Create a `Data` from a pandas DataFrame. If `obs` is `None`, columns are used as obs.\n\n Args:\n df:\n weights: Weights of the data. Has to be 1-D and match the shape\n of the data (nevents).\n obs:\n name:\n \"\"\"\n if obs is None:\n obs = list(df.columns)\n array = df.values\n return cls.from_numpy(obs=obs, array=array, weights=weights, name=name, dtype=dtype)\n\n @classmethod\n def from_numpy(cls, obs: ztyping.ObsTypeInput, array: np.ndarray, weights: ztyping.WeightsInputType = None,\n name: str = None, dtype: tf.DType = None):\n \"\"\"Create `Data` from a `np.array`.\n\n Args:\n obs:\n array:\n name:\n\n Returns:\n \"\"\"\n\n if not isinstance(array, (np.ndarray)) and not (tf.is_tensor(array) and hasattr(array, 'numpy')):\n raise TypeError(f\"`array` has to be a `np.ndarray`. Is currently {type(array)}\")\n if dtype is None:\n dtype = ztypes.float\n tensor = tf.cast(array, dtype=dtype)\n return cls.from_tensor(obs=obs, tensor=tensor, weights=weights, name=name, dtype=dtype)\n\n @classmethod\n def from_tensor(cls, obs: ztyping.ObsTypeInput, tensor: tf.Tensor, weights: ztyping.WeightsInputType = None,\n name: str = None, dtype: tf.DType = None) -> \"Data\":\n \"\"\"Create a `Data` from a `tf.Tensor`. `Value` simply returns the tensor (in the right order).\n\n Args:\n obs:\n tensor:\n name:\n\n Returns:\n \"\"\"\n # dataset = LightDataset.from_tensor(tensor=tensor)\n if dtype is None:\n dtype = ztypes.float\n tensor = tf.cast(tensor, dtype=dtype)\n if len(tensor.shape) == 0:\n tensor = znp.expand_dims(tensor, -1)\n if len(tensor.shape) == 1:\n tensor = znp.expand_dims(tensor, -1)\n # dataset = tf.data.Dataset.from_tensor_slices(tensor)\n dataset = LightDataset.from_tensor(tensor)\n\n return Data(dataset=dataset, obs=obs, name=name, weights=weights, dtype=dtype)\n\n def with_obs(self, obs):\n values = self.value(obs)\n return type(self).from_tensor(obs=self.space, tensor=values, weights=self.weights, name=self.name)\n\n def to_pandas(self, obs: ztyping.ObsTypeInput = None):\n \"\"\"Create a `pd.DataFrame` from `obs` as columns and return it.\n\n Args:\n obs: The observables to use as columns. If `None`, all observables are used.\n\n Returns:\n \"\"\"\n values = self.value(obs=obs)\n if obs is None:\n obs = self.obs\n obs_str = convert_to_obs_str(obs)\n values = values.numpy()\n df = pd.DataFrame(data=values, columns=obs_str)\n return df\n\n def unstack_x(self, obs: ztyping.ObsTypeInput = None, always_list: bool = False):\n \"\"\"Return the unstacked data: a list of tensors or a single Tensor.\n\n Args:\n obs: which observables to return\n always_list: If True, always return a list (also if length 1)\n\n Returns:\n List(tf.Tensor)\n \"\"\"\n return z.unstack_x(self.value(obs=obs))\n\n def value(self, obs: ztyping.ObsTypeInput = None):\n return znp.asarray(self._value_internal(obs=obs))\n # TODO: proper iterations\n # value_iter = self._value_internal(obs=obs)\n # value = next(value_iter)\n # try:\n # next(value_iter)\n # except StopIteration: # it's ok, we're not batched\n # return value\n # else:\n # raise DataIsBatchedError(\n # f\"Data {self} is batched, cannot return only the value. Iterate through it (WIP, make\"\n # f\"an issue on Github if this feature is needed now)\")\n\n def numpy(self):\n return self.value().numpy()\n\n def _cut_data(self, value, obs=None):\n if self.data_range.has_limits:\n data_range = self.data_range.with_obs(obs=obs)\n value = data_range.filter(value)\n\n return value\n\n def _value_internal(self, obs: ztyping.ObsTypeInput = None, filter: bool = True):\n if obs is not None:\n obs = convert_to_obs_str(obs)\n # for raw_value in self.dataset:\n # value = self._check_convert_value(raw_value)\n value = self.dataset.value()\n if filter:\n value = self._cut_data(value, obs=self._original_space.obs)\n value_sorted = self._sort_value(value=value, obs=obs)\n return value_sorted\n\n def _check_convert_value(self, value):\n # TODO(Mayou36): add conversion to right dimension? (n_events, n_obs)? # check if 1-D?\n if len(value.shape.as_list()) == 0:\n value = znp.expand_dims(value, -1)\n if len(value.shape.as_list()) == 1:\n value = znp.expand_dims(value, -1)\n\n # cast data to right type\n if value.dtype != self.dtype:\n value = tf.cast(value, dtype=self.dtype)\n return value\n\n def _sort_value(self, value, obs: Tuple[str]):\n obs = convert_to_container(value=obs, container=tuple)\n # TODO CURRENT: deactivated below!\n perm_indices = self.space.axes if self.space.axes != tuple(range(value.shape[-1])) else False\n\n # permutate = perm_indices is not None\n if obs:\n if not frozenset(obs) <= frozenset(self.obs):\n raise ValueError(\"The observable(s) {} are not contained in the dataset. \"\n \"Only the following are: {}\".format(frozenset(obs) - frozenset(self.obs),\n self.obs))\n perm_indices = self.space.get_reorder_indices(obs=obs)\n # values = list(values[self.obs.index(o)] for o in obs if o in self.obs)\n if perm_indices:\n value = z.unstack_x(value, always_list=True)\n value = [value[i] for i in perm_indices]\n value = z.stack_x(value)\n\n return value\n\n # TODO(Mayou36): use Space to permute data?\n # TODO(Mayou36): raise error is not obs <= self.obs?\n @invalidate_graph\n def sort_by_axes(self, axes: ztyping.AxesTypeInput, allow_superset: bool = True):\n if not allow_superset:\n if not frozenset(axes) <= frozenset(self.axes):\n raise ValueError(\"The observable(s) {} are not contained in the dataset. \"\n \"Only the following are: {}\".format(frozenset(axes) - frozenset(self.axes),\n self.axes))\n space = self.space.with_axes(axes=axes, allow_subset=True)\n\n def setter(value):\n self._space = value\n\n def getter():\n return self.space\n\n return TemporarilySet(value=space, setter=setter, getter=getter)\n\n @invalidate_graph\n def sort_by_obs(self, obs: ztyping.ObsTypeInput, allow_superset: bool = False):\n if not allow_superset:\n if not frozenset(obs) <= frozenset(self.obs):\n raise ValueError(\"The observable(s) {} are not contained in the dataset. \"\n \"Only the following are: {}\".format(frozenset(obs) - frozenset(self.obs),\n self.obs))\n\n space = self.space.with_obs(obs=obs, allow_subset=True, allow_superset=allow_superset)\n\n def setter(value):\n self._space = value\n\n def getter():\n return self.space\n\n return TemporarilySet(value=space, setter=setter, getter=getter)\n\n # def _dense_var_to_tensor(self, dtype=None, name=None, as_ref=False):\n # del name\n # if dtype is not None:\n # if dtype != self.dtype:\n # return NotImplemented\n # if as_ref:\n # # return \"NEVER READ THIS\"\n # raise LogicalUndefinedOperationError(\"There is no ref for the `Data`\")\n # else:\n # return self.value()\n #\n # def _AsTensor(self):\n # return self.value()\n #\n # @staticmethod\n # def _OverloadAllOperators(): # pylint: disable=invalid-name\n # \"\"\"Register overloads for all operators.\"\"\"\n # for operator in tf.Tensor.OVERLOADABLE_OPERATORS:\n # Data._OverloadOperator(operator)\n # # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # # instead)\n # # pylint: disable=protected-access\n # setattr(Data, \"__getitem__\", array_ops._SliceHelperVar)\n #\n # @staticmethod\n # def _OverloadOperator(operator): # pylint: disable=invalid-name\n # \"\"\"Defer an operator overload to `ops.Tensor`.\n\n # We pull the operator out of ops.Tensor dynamically to avoid ordering issues.\n # Args:\n # operator: string. The operator name.\n # \"\"\"\n #\n # tensor_oper = getattr(tf.Tensor, operator)\n #\n # def _run_op(a, *args):\n # # pylint: disable=protected-access\n # value = a._AsTensor()\n # return tensor_oper(value, *args)\n #\n # # Propagate __doc__ to wrapper\n # try:\n # _run_op.__doc__ = tensor_oper.__doc__\n # except AttributeError:\n # pass\n #\n # setattr(Data, operator, _run_op)\n\n def _check_input_data_range(self, data_range):\n data_range = self._convert_sort_space(limits=data_range)\n if not frozenset(self.data_range.obs) == frozenset(data_range.obs):\n raise ObsIncompatibleError(f\"Data range has to cover the full observable space {self.data_range.obs}, not \"\n f\"only {data_range.obs}\")\n return data_range\n\n # TODO(Mayou36): refactor with pdf or other range things?\n def _convert_sort_space(self, obs: ztyping.ObsTypeInput = None, axes: ztyping.AxesTypeInput = None,\n limits: ztyping.LimitsTypeInput = None) -> Union[Space, None]:\n \"\"\"Convert the inputs (using eventually `obs`, `axes`) to\n :py:class:`~zfit.Space` and sort them according to own `obs`.\n\n Args:\n obs:\n axes:\n limits:\n\n Returns:\n \"\"\"\n if obs is None: # for simple limits to convert them\n obs = self.obs\n space = convert_to_space(obs=obs, axes=axes, limits=limits)\n\n if self.space is not None:\n space = space.with_coords(self.space, allow_subset=True)\n return space\n\n def _get_nevents(self):\n return tf.shape(input=self.value())[0]\n\n def __str__(self) -> str:\n return f'<zfit.Data: {self.name} obs={self.obs}>'\n\n def to_binned(self, space):\n from zfit._data.binneddatav1 import BinnedData\n return BinnedData.from_unbinned(space=space, data=self)\n\n\nclass SampleData(Data):\n _cache_counting = 0\n\n def __init__(self, dataset: Union[tf.data.Dataset, \"LightDataset\"],\n obs: ztyping.ObsTypeInput = None, weights=None, name: str = None,\n dtype: tf.DType = ztypes.float):\n super().__init__(dataset, obs, name=name, weights=weights, iterator_feed_dict=None, dtype=dtype)\n\n @classmethod\n def get_cache_counting(cls):\n counting = cls._cache_counting\n cls._cache_counting += 1\n return counting\n\n @classmethod\n def from_sample(cls, sample: tf.Tensor, obs: ztyping.ObsTypeInput, name: str = None,\n weights=None):\n dataset = LightDataset.from_tensor(sample)\n return SampleData(dataset=dataset, obs=obs, name=name, weights=weights)\n\n\nclass Sampler(Data):\n _cache_counting = 0\n\n def __init__(self, dataset: \"LightDataset\", sample_func: Callable, sample_holder: tf.Variable,\n n: Union[ztyping.NumericalScalarType, Callable], weights=None,\n fixed_params: Dict[\"zfit.Parameter\", ztyping.NumericalScalarType] = None,\n obs: ztyping.ObsTypeInput = None, name: str = None,\n dtype: tf.DType = ztypes.float):\n\n super().__init__(dataset=dataset, obs=obs, name=name, weights=weights, iterator_feed_dict=None, dtype=dtype)\n if fixed_params is None:\n fixed_params = OrderedDict()\n if isinstance(fixed_params, (list, tuple)):\n fixed_params = OrderedDict((param, param.numpy()) for param in fixed_params) # TODO: numpy -> read_value?\n\n self._initial_resampled = False\n\n self.fixed_params = fixed_params\n self.sample_holder = sample_holder\n self.sample_func = sample_func\n self.n = n\n self._n_holder = n\n self.resample() # to be used for precompilations etc\n\n @property\n def n_samples(self):\n return self._n_holder\n\n @property\n def _approx_nevents(self):\n nevents = super()._approx_nevents\n if nevents is None:\n nevents = self.n\n return nevents\n\n def _value_internal(self, obs: ztyping.ObsTypeInput = None, filter: bool = True):\n if not self._initial_resampled:\n raise RuntimeError(\n \"No data generated yet. Use `resample()` to generate samples or directly use `model.sample()`\"\n \"for single-time sampling.\")\n return super()._value_internal(obs=obs, filter=filter)\n\n @classmethod\n def get_cache_counting(cls):\n counting = cls._cache_counting\n cls._cache_counting += 1\n return counting\n\n @classmethod\n def from_sample(cls, sample_func: Callable, n: ztyping.NumericalScalarType, obs: ztyping.ObsTypeInput,\n fixed_params=None, name: str = None, weights=None, dtype=None):\n obs = convert_to_space(obs)\n\n if fixed_params is None:\n fixed_params = []\n if dtype is None:\n dtype = ztypes.float\n # from tensorflow.python.ops.variables import VariableV1\n sample_holder = tf.Variable(initial_value=sample_func(), dtype=dtype, trainable=False, # HACK: sample_func\n # validate_shape=False,\n shape=(None, obs.n_obs),\n name=f\"sample_data_holder_{cls.get_cache_counting()}\")\n dataset = LightDataset.from_tensor(sample_holder)\n\n return Sampler(dataset=dataset, sample_holder=sample_holder, sample_func=sample_func, fixed_params=fixed_params,\n n=n, obs=obs, name=name, weights=weights)\n\n def resample(self, param_values: Mapping = None, n: Union[int, tf.Tensor] = None):\n \"\"\"Update the sample by newly sampling. This affects any object that used this data already.\n\n All params that are not in the attribute `fixed_params` will use their current value for\n the creation of the new sample. The value can also be overwritten for one sampling by providing\n a mapping with `param_values` from `Parameter` to the temporary `value`.\n\n Args:\n param_values: a mapping from :py:class:`~zfit.Parameter` to a `value`. For the current sampling,\n `Parameter` will use the `value`.\n n: the number of samples to produce. If the `Sampler` was created with\n anything else then a numerical or tf.Tensor, this can't be used.\n \"\"\"\n if n is None:\n n = self.n\n\n temp_param_values = self.fixed_params.copy()\n if param_values is not None:\n temp_param_values.update(param_values)\n\n with ExitStack() as stack:\n\n _ = [stack.enter_context(param.set_value(val)) for param, val in temp_param_values.items()]\n\n # if not (n and self._initial_resampled): # we want to load and make sure that it's initialized\n # # means it's handled inside the function\n # # TODO(Mayou36): check logic; what if new_samples loaded? get's overwritten by initializer\n # # fixed with self.n, needs cleanup\n # if not (isinstance(self.n_samples, str) or self.n_samples is None):\n # self.sess.run(self.n_samples.initializer)\n # if n:\n # if not isinstance(self.n_samples, tf.Variable):\n # raise RuntimeError(\"Cannot set a new `n` if not a Tensor-like object was given\")\n # self.n_samples.assign(n)\n\n new_sample = self.sample_func(n)\n # self.sample_holder.assign(new_sample)\n self.sample_holder.assign(new_sample, read_value=False)\n self._initial_resampled = True\n\n def __str__(self) -> str:\n return f'<Sampler: {self.name} obs={self.obs}>'\n\n\nregister_tensor_conversion(Data, name='Data', overload_operators=True)\n\n\nclass LightDataset:\n\n def __init__(self, tensor):\n if not isinstance(tensor, (tf.Tensor, tf.Variable)):\n tensor = z.convert_to_tensor(tensor)\n self.tensor = tensor\n\n def batch(self, batch_size): # ad-hoc just empty\n return self\n\n def __iter__(self):\n yield self.value()\n\n @classmethod\n def from_tensor(cls, tensor):\n return cls(tensor=tensor)\n\n def value(self):\n return self.tensor\n\n\ndef sum_samples(sample1: ZfitUnbinnedData, sample2: ZfitUnbinnedData, obs: ZfitSpace, shuffle: bool = False):\n samples = [sample1, sample2]\n if obs is None:\n raise WorkInProgressError\n sample2 = sample2.value(obs=obs)\n if shuffle:\n sample2 = tf.random.shuffle(sample2)\n sample1 = sample1.value(obs=obs)\n tensor = sample1 + sample2\n if any([s.weights is not None for s in samples]):\n raise WorkInProgressError(\"Cannot combine weights currently\")\n weights = None\n\n return SampleData.from_sample(sample=tensor, obs=obs, weights=weights)\n"
] | [
[
"tensorflow.random.shuffle",
"tensorflow.cast",
"tensorflow.is_tensor",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": [
"2.3",
"2.2",
"2.4"
]
}
] |
boehmv/UPF3 | [
"9a3ce9de98cd7e9722ff7de55a783f86c7eef7c0"
] | [
"tools/leafcutter/xlsx_combine_leafcutter.py"
] | [
"#!/usr/bin/python3\nimport pandas as pd\nimport numpy as np\nimport xlsxwriter\nimport glob\nfrom functools import reduce\n\ndef get_col_widths(dataframe):\n # First we find the maximum length of the index column \n idx_max = max([len(str(s)) for s in dataframe.index.values] + [len(str(dataframe.index.name))])\n # Then, we concatenate this to the max of the lengths of column name and its values for each column, left to right\n return [idx_max] + [max([len(str(s)) for s in dataframe[col].values] + [len(col)]) for col in dataframe.columns]\n\n# importing sys module\nimport sys\n\n# Import first argument as IRFinder/Combined folder\nmyfolder = sys.argv[1]\n\n# Import third argument as condition string, gets converted to characters\ncond=sys.argv[3]\ncond=list(cond.split(\",\"))\n\n# reading second argument as final output file \nwith open(sys.argv[2], 'wb') as outf:\n\n # Define final data frame as reference to align all data to\n df_final = pd.DataFrame(columns=['uniqueID', 'gene_name', 'gene_id', 'altgenes'])\n \n # Get all conditions \n print('# of conditions:')\n print(len(cond))\n print('Conditions:')\n print(cond)\n\n # Loop over each condition in leafcutter/Combined folder\n for f in cond[1:]:\n myfilename=('{folder}/control_vs_{condition}_final.xlsx'.format(folder=myfolder, condition=f))\n df = pd.read_excel(myfilename)\n\n # Report data before filtering\n print('# rows before filtering:')\n print(len(df.index)) \n \n # Define uniqueID\n df[\"uniqueID\"] = df['chr'].map(str) + \":\" + df['start'].map(str) + \"-\" + df['end'].map(str) + \":\" + df['gene_id'].map(str)\n\n #drop unnecessary columns\n df = df.drop(columns=['Unnamed: 0', 'chr', 'start', 'end', 'logef', 'status', 'loglr', 'df', 'p'])\n condition=f\n # Reorder columns, split coordinates and give p.adjust and log2FoldChange condition names\n df = df[['uniqueID', 'gene_name', 'gene_id', 'altgenes', 'deltapsi', 'p.adjust', 'control', condition]]\n df.columns = ['uniqueID', 'gene_name', 'gene_id', 'altgenes', \"%s_dPSI\" % condition, \"%s_p.adjust\" % condition, \"PSI_control_%s\" % condition, \"PSI_%s\" % condition,]\n df_filtered = df[(df[\"%s_p.adjust\" % condition] < 0.05)]\n print('# rows after filtering')\n print(len(df_filtered.index))\n df_final = pd.merge(df_final,df_filtered,on=['uniqueID', 'gene_name', 'gene_id', 'altgenes'], how='outer')\n print('# rows of final df')\n print(len(df_final.index))\n print(\"Condition %s processed\" % condition)\n\n # Get proper coordinates\n IDs = df_final[\"uniqueID\"].str.split(\":\", n = 2, expand = True)\n Coords = IDs[1].str.split(\"-\", expand = True)\n df_final[\"coordinates\"] = IDs[0] +':'+ Coords[0] +'-'+ Coords[1]\n cols = df_final.columns.tolist()\n cols = cols[-1:] + cols[:-1]\n df_final = df_final[cols] \n\n # Get table range\n end_row = len(df_final.index)\n end_column = len(df_final.columns)-1\n cell_range = xlsxwriter.utility.xl_range(0, 0, end_row, end_column)\n\n #write to Excel file\n writer = pd.ExcelWriter(outf, engine='xlsxwriter')\n df_final.to_excel(writer, sheet_name='total', index=False)\n workbook = writer.book\n worksheet = writer.sheets['total']\n\n # Hack for preserving column headers when inserting table\n header = [{'header': di} for di in df_final.columns.tolist()]\n worksheet.add_table(cell_range,{'header_row': True,'columns':header})\n\n # Formating the output excel file\n worksheet.set_zoom(100)\n for i, width in enumerate(get_col_widths(df_final)):\n worksheet.set_column(i, i, width)\n worksheet.set_column(0, 0, 25)\n worksheet.set_column(1, 1, 10)\n worksheet.set_column(2, 2, 15)\n worksheet.set_column(3, 3, 20)\n worksheet.set_column(4, 4, 10)\n for f in range(5,end_column-1):\n worksheet.conditional_format(0, f, end_row-1, f, {'type':'3_color_scale', 'min_color': \"red\", 'mid_color': \"white\", 'max_color': \"green\"})\n writer.save()\n"
] | [
[
"pandas.merge",
"pandas.read_excel",
"pandas.DataFrame",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
scsven/faiss | [
"0c18622fba0a5312567380fd7df28d971f4f57d1"
] | [
"python/faiss.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n#@nolint\n\n# not linting this file because it imports * form swigfaiss, which\n# causes a ton of useless warnings.\n\nimport numpy as np\nimport sys\nimport inspect\nimport pdb\nimport platform\nimport subprocess\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef instruction_set():\n if platform.system() == \"Darwin\":\n if subprocess.check_output([\"/usr/sbin/sysctl\", \"hw.optional.avx2_0\"])[-1] == '1':\n return \"AVX2\"\n else:\n return \"default\"\n elif platform.system() == \"Linux\":\n import numpy.distutils.cpuinfo\n if \"avx2\" in numpy.distutils.cpuinfo.cpu.info[0]['flags']:\n return \"AVX2\"\n else:\n return \"default\"\n\n\ntry:\n instr_set = instruction_set()\n if instr_set == \"AVX2\":\n logger.info(\"Loading faiss with AVX2 support.\")\n from .swigfaiss_avx2 import *\n else:\n logger.info(\"Loading faiss.\")\n from .swigfaiss import *\n\nexcept ImportError:\n # we import * so that the symbol X can be accessed as faiss.X\n logger.info(\"Loading faiss.\")\n from .swigfaiss import *\n\n\n__version__ = \"%d.%d.%d\" % (FAISS_VERSION_MAJOR,\n FAISS_VERSION_MINOR,\n FAISS_VERSION_PATCH)\n\n##################################################################\n# The functions below add or replace some methods for classes\n# this is to be able to pass in numpy arrays directly\n# The C++ version of the classnames will be suffixed with _c\n##################################################################\n\n\ndef replace_method(the_class, name, replacement, ignore_missing=False):\n try:\n orig_method = getattr(the_class, name)\n except AttributeError:\n if ignore_missing:\n return\n raise\n if orig_method.__name__ == 'replacement_' + name:\n # replacement was done in parent class\n return\n setattr(the_class, name + '_c', orig_method)\n setattr(the_class, name, replacement)\n\n\ndef handle_Clustering():\n def replacement_train(self, x, index):\n assert x.flags.contiguous\n n, d = x.shape\n assert d == self.d\n self.train_c(n, swig_ptr(x), index)\n replace_method(Clustering, 'train', replacement_train)\n\n\nhandle_Clustering()\n\n\ndef handle_Quantizer(the_class):\n\n def replacement_train(self, x):\n n, d = x.shape\n assert d == self.d\n self.train_c(n, swig_ptr(x))\n\n def replacement_compute_codes(self, x):\n n, d = x.shape\n assert d == self.d\n codes = np.empty((n, self.code_size), dtype='uint8')\n self.compute_codes_c(swig_ptr(x), swig_ptr(codes), n)\n return codes\n\n def replacement_decode(self, codes):\n n, cs = codes.shape\n assert cs == self.code_size\n x = np.empty((n, self.d), dtype='float32')\n self.decode_c(swig_ptr(codes), swig_ptr(x), n)\n return x\n\n replace_method(the_class, 'train', replacement_train)\n replace_method(the_class, 'compute_codes', replacement_compute_codes)\n replace_method(the_class, 'decode', replacement_decode)\n\n\nhandle_Quantizer(ProductQuantizer)\nhandle_Quantizer(ScalarQuantizer)\n\n\ndef handle_Index(the_class):\n\n def replacement_add(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d == self.d\n self.add_c(n, swig_ptr(x))\n\n def replacement_add_with_ids(self, x, ids):\n n, d = x.shape\n assert d == self.d\n assert ids.shape == (n, ), 'not same nb of vectors as ids'\n self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))\n\n def replacement_assign(self, x, k):\n n, d = x.shape\n assert d == self.d\n labels = np.empty((n, k), dtype=np.int64)\n self.assign_c(n, swig_ptr(x), swig_ptr(labels), k)\n return labels\n\n def replacement_train(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d == self.d\n self.train_c(n, swig_ptr(x))\n\n def replacement_search(self, x, k):\n n, d = x.shape\n assert d == self.d\n distances = np.empty((n, k), dtype=np.float32)\n labels = np.empty((n, k), dtype=np.int64)\n self.search_c(n, swig_ptr(x),\n k, swig_ptr(distances),\n swig_ptr(labels))\n return distances, labels\n\n def replacement_search_and_reconstruct(self, x, k):\n n, d = x.shape\n assert d == self.d\n distances = np.empty((n, k), dtype=np.float32)\n labels = np.empty((n, k), dtype=np.int64)\n recons = np.empty((n, k, d), dtype=np.float32)\n self.search_and_reconstruct_c(n, swig_ptr(x),\n k, swig_ptr(distances),\n swig_ptr(labels),\n swig_ptr(recons))\n return distances, labels, recons\n\n def replacement_remove_ids(self, x):\n if isinstance(x, IDSelector):\n sel = x\n else:\n assert x.ndim == 1\n sel = IDSelectorBatch(x.size, swig_ptr(x))\n return self.remove_ids_c(sel)\n\n def replacement_reconstruct(self, key):\n x = np.empty(self.d, dtype=np.float32)\n self.reconstruct_c(key, swig_ptr(x))\n return x\n\n def replacement_reconstruct_n(self, n0, ni):\n x = np.empty((ni, self.d), dtype=np.float32)\n self.reconstruct_n_c(n0, ni, swig_ptr(x))\n return x\n\n def replacement_update_vectors(self, keys, x):\n n = keys.size\n assert keys.shape == (n, )\n assert x.shape == (n, self.d)\n self.update_vectors_c(n, swig_ptr(keys), swig_ptr(x))\n\n def replacement_range_search(self, x, thresh):\n n, d = x.shape\n assert d == self.d\n res = RangeSearchResult(n)\n self.range_search_c(n, swig_ptr(x), thresh, res)\n # get pointers and copy them\n lims = rev_swig_ptr(res.lims, n + 1).copy()\n nd = int(lims[-1])\n D = rev_swig_ptr(res.distances, nd).copy()\n I = rev_swig_ptr(res.labels, nd).copy()\n return lims, D, I\n\n def replacement_sa_encode(self, x):\n n, d = x.shape\n assert d == self.d\n codes = np.empty((n, self.sa_code_size()), dtype='uint8')\n self.sa_encode_c(n, swig_ptr(x), swig_ptr(codes))\n return codes\n\n def replacement_sa_decode(self, codes):\n n, cs = codes.shape\n assert cs == self.sa_code_size()\n x = np.empty((n, self.d), dtype='float32')\n self.sa_decode_c(n, swig_ptr(codes), swig_ptr(x))\n return x\n\n replace_method(the_class, 'add', replacement_add)\n replace_method(the_class, 'add_with_ids', replacement_add_with_ids)\n replace_method(the_class, 'assign', replacement_assign)\n replace_method(the_class, 'train', replacement_train)\n replace_method(the_class, 'search', replacement_search)\n replace_method(the_class, 'remove_ids', replacement_remove_ids)\n replace_method(the_class, 'reconstruct', replacement_reconstruct)\n replace_method(the_class, 'reconstruct_n', replacement_reconstruct_n)\n replace_method(the_class, 'range_search', replacement_range_search)\n replace_method(the_class, 'update_vectors', replacement_update_vectors,\n ignore_missing=True)\n replace_method(the_class, 'search_and_reconstruct',\n replacement_search_and_reconstruct, ignore_missing=True)\n replace_method(the_class, 'sa_encode', replacement_sa_encode)\n replace_method(the_class, 'sa_decode', replacement_sa_decode)\n\ndef handle_IndexBinary(the_class):\n\n def replacement_add(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d * 8 == self.d\n self.add_c(n, swig_ptr(x))\n\n def replacement_add_with_ids(self, x, ids):\n n, d = x.shape\n assert d * 8 == self.d\n assert ids.shape == (n, ), 'not same nb of vectors as ids'\n self.add_with_ids_c(n, swig_ptr(x), swig_ptr(ids))\n\n def replacement_train(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d * 8 == self.d\n self.train_c(n, swig_ptr(x))\n\n def replacement_reconstruct(self, key):\n x = np.empty(self.d // 8, dtype=np.uint8)\n self.reconstruct_c(key, swig_ptr(x))\n return x\n\n def replacement_search(self, x, k):\n n, d = x.shape\n assert d * 8 == self.d\n distances = np.empty((n, k), dtype=np.int32)\n labels = np.empty((n, k), dtype=np.int64)\n self.search_c(n, swig_ptr(x),\n k, swig_ptr(distances),\n swig_ptr(labels))\n return distances, labels\n\n def replacement_remove_ids(self, x):\n if isinstance(x, IDSelector):\n sel = x\n else:\n assert x.ndim == 1\n sel = IDSelectorBatch(x.size, swig_ptr(x))\n return self.remove_ids_c(sel)\n\n replace_method(the_class, 'add', replacement_add)\n replace_method(the_class, 'add_with_ids', replacement_add_with_ids)\n replace_method(the_class, 'train', replacement_train)\n replace_method(the_class, 'search', replacement_search)\n replace_method(the_class, 'reconstruct', replacement_reconstruct)\n replace_method(the_class, 'remove_ids', replacement_remove_ids)\n\n\ndef handle_VectorTransform(the_class):\n\n def apply_method(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d == self.d_in\n y = np.empty((n, self.d_out), dtype=np.float32)\n self.apply_noalloc(n, swig_ptr(x), swig_ptr(y))\n return y\n\n def replacement_reverse_transform(self, x):\n n, d = x.shape\n assert d == self.d_out\n y = np.empty((n, self.d_in), dtype=np.float32)\n self.reverse_transform_c(n, swig_ptr(x), swig_ptr(y))\n return y\n\n def replacement_vt_train(self, x):\n assert x.flags.contiguous\n n, d = x.shape\n assert d == self.d_in\n self.train_c(n, swig_ptr(x))\n\n replace_method(the_class, 'train', replacement_vt_train)\n # apply is reserved in Pyton...\n the_class.apply_py = apply_method\n replace_method(the_class, 'reverse_transform',\n replacement_reverse_transform)\n\n\ndef handle_AutoTuneCriterion(the_class):\n def replacement_set_groundtruth(self, D, I):\n if D:\n assert I.shape == D.shape\n self.nq, self.gt_nnn = I.shape\n self.set_groundtruth_c(\n self.gt_nnn, swig_ptr(D) if D else None, swig_ptr(I))\n\n def replacement_evaluate(self, D, I):\n assert I.shape == D.shape\n assert I.shape == (self.nq, self.nnn)\n return self.evaluate_c(swig_ptr(D), swig_ptr(I))\n\n replace_method(the_class, 'set_groundtruth', replacement_set_groundtruth)\n replace_method(the_class, 'evaluate', replacement_evaluate)\n\n\ndef handle_ParameterSpace(the_class):\n def replacement_explore(self, index, xq, crit):\n assert xq.shape == (crit.nq, index.d)\n ops = OperatingPoints()\n self.explore_c(index, crit.nq, swig_ptr(xq),\n crit, ops)\n return ops\n replace_method(the_class, 'explore', replacement_explore)\n\n\ndef handle_MatrixStats(the_class):\n original_init = the_class.__init__\n\n def replacement_init(self, m):\n assert len(m.shape) == 2\n original_init(self, m.shape[0], m.shape[1], swig_ptr(m))\n\n the_class.__init__ = replacement_init\n\nhandle_MatrixStats(MatrixStats)\n\n\nthis_module = sys.modules[__name__]\n\n\nfor symbol in dir(this_module):\n obj = getattr(this_module, symbol)\n # print symbol, isinstance(obj, (type, types.ClassType))\n if inspect.isclass(obj):\n the_class = obj\n if issubclass(the_class, Index):\n handle_Index(the_class)\n\n if issubclass(the_class, IndexBinary):\n handle_IndexBinary(the_class)\n\n if issubclass(the_class, VectorTransform):\n handle_VectorTransform(the_class)\n\n if issubclass(the_class, AutoTuneCriterion):\n handle_AutoTuneCriterion(the_class)\n\n if issubclass(the_class, ParameterSpace):\n handle_ParameterSpace(the_class)\n\n\n###########################################\n# Add Python references to objects\n# we do this at the Python class wrapper level.\n###########################################\n\ndef add_ref_in_constructor(the_class, parameter_no):\n # adds a reference to parameter parameter_no in self\n # so that that parameter does not get deallocated before self\n original_init = the_class.__init__\n\n def replacement_init(self, *args):\n original_init(self, *args)\n self.referenced_objects = [args[parameter_no]]\n\n def replacement_init_multiple(self, *args):\n original_init(self, *args)\n pset = parameter_no[len(args)]\n self.referenced_objects = [args[no] for no in pset]\n\n if type(parameter_no) == dict:\n # a list of parameters to keep, depending on the number of arguments\n the_class.__init__ = replacement_init_multiple\n else:\n the_class.__init__ = replacement_init\n\ndef add_ref_in_method(the_class, method_name, parameter_no):\n original_method = getattr(the_class, method_name)\n def replacement_method(self, *args):\n ref = args[parameter_no]\n if not hasattr(self, 'referenced_objects'):\n self.referenced_objects = [ref]\n else:\n self.referenced_objects.append(ref)\n return original_method(self, *args)\n setattr(the_class, method_name, replacement_method)\n\ndef add_ref_in_function(function_name, parameter_no):\n # assumes the function returns an object\n original_function = getattr(this_module, function_name)\n def replacement_function(*args):\n result = original_function(*args)\n ref = args[parameter_no]\n result.referenced_objects = [ref]\n return result\n setattr(this_module, function_name, replacement_function)\n\nadd_ref_in_constructor(IndexIVFFlat, 0)\nadd_ref_in_constructor(IndexIVFFlatDedup, 0)\nadd_ref_in_constructor(IndexPreTransform, {2: [0, 1], 1: [0]})\nadd_ref_in_method(IndexPreTransform, 'prepend_transform', 0)\nadd_ref_in_constructor(IndexIVFPQ, 0)\nadd_ref_in_constructor(IndexIVFPQR, 0)\nadd_ref_in_constructor(Index2Layer, 0)\nadd_ref_in_constructor(Level1Quantizer, 0)\nadd_ref_in_constructor(IndexIVFScalarQuantizer, 0)\nadd_ref_in_constructor(IndexIDMap, 0)\nadd_ref_in_constructor(IndexIDMap2, 0)\nadd_ref_in_constructor(IndexHNSW, 0)\nadd_ref_in_method(IndexShards, 'add_shard', 0)\nadd_ref_in_method(IndexBinaryShards, 'add_shard', 0)\nadd_ref_in_constructor(IndexRefineFlat, 0)\nadd_ref_in_constructor(IndexBinaryIVF, 0)\nadd_ref_in_constructor(IndexBinaryFromFloat, 0)\nadd_ref_in_constructor(IndexBinaryIDMap, 0)\nadd_ref_in_constructor(IndexBinaryIDMap2, 0)\n\nadd_ref_in_method(IndexReplicas, 'addIndex', 0)\nadd_ref_in_method(IndexBinaryReplicas, 'addIndex', 0)\n\n# seems really marginal...\n# remove_ref_from_method(IndexReplicas, 'removeIndex', 0)\n\nif hasattr(this_module, 'GpuIndexFlat'):\n # handle all the GPUResources refs\n add_ref_in_function('index_cpu_to_gpu', 0)\n add_ref_in_constructor(GpuIndexFlat, 0)\n add_ref_in_constructor(GpuIndexFlatIP, 0)\n add_ref_in_constructor(GpuIndexFlatL2, 0)\n add_ref_in_constructor(GpuIndexIVFFlat, 0)\n add_ref_in_constructor(GpuIndexIVFScalarQuantizer, 0)\n add_ref_in_constructor(GpuIndexIVFPQ, 0)\n add_ref_in_constructor(GpuIndexBinaryFlat, 0)\n\n\n\n###########################################\n# GPU functions\n###########################################\n\n\ndef index_cpu_to_gpu_multiple_py(resources, index, co=None, gpus=None):\n \"\"\" builds the C++ vectors for the GPU indices and the\n resources. Handles the case where the resources are assigned to\n the list of GPUs \"\"\"\n if gpus is None:\n gpus = range(len(resources))\n vres = GpuResourcesVector()\n vdev = IntVector()\n for i, res in zip(gpus, resources):\n vdev.push_back(i)\n vres.push_back(res)\n index = index_cpu_to_gpu_multiple(vres, vdev, index, co)\n index.referenced_objects = resources\n return index\n\n\ndef index_cpu_to_all_gpus(index, co=None, ngpu=-1):\n index_gpu = index_cpu_to_gpus_list(index, co=co, gpus=None, ngpu=ngpu)\n return index_gpu\n\n\ndef index_cpu_to_gpus_list(index, co=None, gpus=None, ngpu=-1):\n \"\"\" Here we can pass list of GPU ids as a parameter or ngpu to\n use first n GPU's. gpus mut be a list or None\"\"\"\n if (gpus is None) and (ngpu == -1): # All blank\n gpus = range(get_num_gpus())\n elif (gpus is None) and (ngpu != -1): # Get number of GPU's only\n gpus = range(ngpu)\n res = [StandardGpuResources() for _ in gpus]\n index_gpu = index_cpu_to_gpu_multiple_py(res, index, co, gpus)\n return index_gpu\n\n\n###########################################\n# numpy array / std::vector conversions\n###########################################\n\n# mapping from vector names in swigfaiss.swig and the numpy dtype names\nvector_name_map = {\n 'Float': 'float32',\n 'Byte': 'uint8',\n 'Char': 'int8',\n 'Uint64': 'uint64',\n 'Long': 'int64',\n 'Int': 'int32',\n 'Double': 'float64'\n }\n\ndef vector_to_array(v):\n \"\"\" convert a C++ vector to a numpy array \"\"\"\n classname = v.__class__.__name__\n assert classname.endswith('Vector')\n dtype = np.dtype(vector_name_map[classname[:-6]])\n a = np.empty(v.size(), dtype=dtype)\n if v.size() > 0:\n memcpy(swig_ptr(a), v.data(), a.nbytes)\n return a\n\n\ndef vector_float_to_array(v):\n return vector_to_array(v)\n\n\ndef copy_array_to_vector(a, v):\n \"\"\" copy a numpy array to a vector \"\"\"\n n, = a.shape\n classname = v.__class__.__name__\n assert classname.endswith('Vector')\n dtype = np.dtype(vector_name_map[classname[:-6]])\n assert dtype == a.dtype, (\n 'cannot copy a %s array to a %s (should be %s)' % (\n a.dtype, classname, dtype))\n v.resize(n)\n if n > 0:\n memcpy(v.data(), swig_ptr(a), a.nbytes)\n\n\n###########################################\n# Wrapper for a few functions\n###########################################\n\ndef kmin(array, k):\n \"\"\"return k smallest values (and their indices) of the lines of a\n float32 array\"\"\"\n m, n = array.shape\n I = np.zeros((m, k), dtype='int64')\n D = np.zeros((m, k), dtype='float32')\n ha = float_maxheap_array_t()\n ha.ids = swig_ptr(I)\n ha.val = swig_ptr(D)\n ha.nh = m\n ha.k = k\n ha.heapify()\n ha.addn(n, swig_ptr(array))\n ha.reorder()\n return D, I\n\n\ndef kmax(array, k):\n \"\"\"return k largest values (and their indices) of the lines of a\n float32 array\"\"\"\n m, n = array.shape\n I = np.zeros((m, k), dtype='int64')\n D = np.zeros((m, k), dtype='float32')\n ha = float_minheap_array_t()\n ha.ids = swig_ptr(I)\n ha.val = swig_ptr(D)\n ha.nh = m\n ha.k = k\n ha.heapify()\n ha.addn(n, swig_ptr(array))\n ha.reorder()\n return D, I\n\n\ndef pairwise_distances(xq, xb, mt=METRIC_L2, metric_arg=0):\n \"\"\"compute the whole pairwise distance matrix between two sets of\n vectors\"\"\"\n nq, d = xq.shape\n nb, d2 = xb.shape\n assert d == d2\n dis = np.empty((nq, nb), dtype='float32')\n if mt == METRIC_L2:\n pairwise_L2sqr(\n d, nq, swig_ptr(xq),\n nb, swig_ptr(xb),\n swig_ptr(dis))\n else:\n pairwise_extra_distances(\n d, nq, swig_ptr(xq),\n nb, swig_ptr(xb),\n mt, metric_arg,\n swig_ptr(dis))\n return dis\n\n\n\n\ndef rand(n, seed=12345):\n res = np.empty(n, dtype='float32')\n float_rand(swig_ptr(res), res.size, seed)\n return res\n\n\ndef randint(n, seed=12345, vmax=None):\n res = np.empty(n, dtype='int64')\n if vmax is None:\n int64_rand(swig_ptr(res), res.size, seed)\n else:\n int64_rand_max(swig_ptr(res), res.size, vmax, seed)\n return res\n\nlrand = randint\n\ndef randn(n, seed=12345):\n res = np.empty(n, dtype='float32')\n float_randn(swig_ptr(res), res.size, seed)\n return res\n\n\ndef eval_intersection(I1, I2):\n \"\"\" size of intersection between each line of two result tables\"\"\"\n n = I1.shape[0]\n assert I2.shape[0] == n\n k1, k2 = I1.shape[1], I2.shape[1]\n ninter = 0\n for i in range(n):\n ninter += ranklist_intersection_size(\n k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i]))\n return ninter\n\n\ndef normalize_L2(x):\n fvec_renorm_L2(x.shape[1], x.shape[0], swig_ptr(x))\n\n# MapLong2Long interface\n\ndef replacement_map_add(self, keys, vals):\n n, = keys.shape\n assert (n,) == keys.shape\n self.add_c(n, swig_ptr(keys), swig_ptr(vals))\n\ndef replacement_map_search_multiple(self, keys):\n n, = keys.shape\n vals = np.empty(n, dtype='int64')\n self.search_multiple_c(n, swig_ptr(keys), swig_ptr(vals))\n return vals\n\nreplace_method(MapLong2Long, 'add', replacement_map_add)\nreplace_method(MapLong2Long, 'search_multiple', replacement_map_search_multiple)\n\n\n###########################################\n# Kmeans object\n###########################################\n\n\nclass Kmeans:\n \"\"\"shallow wrapper around the Clustering object. The important method\n is train().\"\"\"\n\n def __init__(self, d, k, **kwargs):\n \"\"\"d: input dimension, k: nb of centroids. Additional\n parameters are passed on the ClusteringParameters object,\n including niter=25, verbose=False, spherical = False\n \"\"\"\n self.d = d\n self.k = k\n self.gpu = False\n self.cp = ClusteringParameters()\n for k, v in kwargs.items():\n if k == 'gpu':\n self.gpu = v\n else:\n # if this raises an exception, it means that it is a non-existent field\n getattr(self.cp, k)\n setattr(self.cp, k, v)\n self.centroids = None\n\n def train(self, x):\n n, d = x.shape\n assert d == self.d\n clus = Clustering(d, self.k, self.cp)\n if self.cp.spherical:\n self.index = IndexFlatIP(d)\n else:\n self.index = IndexFlatL2(d)\n if self.gpu:\n if self.gpu == True:\n ngpu = -1\n else:\n ngpu = self.gpu\n self.index = index_cpu_to_all_gpus(self.index, ngpu=ngpu)\n clus.train(x, self.index)\n centroids = vector_float_to_array(clus.centroids)\n self.centroids = centroids.reshape(self.k, d)\n self.obj = vector_float_to_array(clus.obj)\n return self.obj[-1] if self.obj.size > 0 else 0.0\n\n def assign(self, x):\n assert self.centroids is not None, \"should train before assigning\"\n self.index.reset()\n self.index.add(self.centroids)\n D, I = self.index.search(x, 1)\n return D.ravel(), I.ravel()\n\n# IndexProxy was renamed to IndexReplicas, remap the old name for any old code\n# people may have\nIndexProxy = IndexReplicas\nConcatenatedInvertedLists = HStackInvertedLists\n\n###########################################\n# serialization of indexes to byte arrays\n###########################################\n\ndef serialize_index(index):\n \"\"\" convert an index to a numpy uint8 array \"\"\"\n writer = VectorIOWriter()\n write_index(index, writer)\n return vector_to_array(writer.data)\n\ndef deserialize_index(data):\n reader = VectorIOReader()\n copy_array_to_vector(data, reader.data)\n return read_index(reader)\n"
] | [
[
"numpy.empty",
"numpy.zeros",
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stegua/dotlib | [
"754d93f16522714668e99a3c313a2acdc2cd0bd1"
] | [
"python/wd_dual_simplex.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 4 10:45:47 2017\n\n@author: gualandi\n\"\"\"\n\nimport numpy as np\nfrom gurobipy import Model, GRB, quicksum, tuplelist\n\ndef ComputeDistanceMatrix(n, p=2, q=1):\n \"\"\" Compute the ground distance with power p of an n*n image \"\"\"\n def m(x,y):\n return x*s+y\n \n s = int(np.sqrt(n))\n C = np.zeros((n, n))\n for i in range(s):\n for j in range(s):\n for v in range(s):\n for w in range(s):\n C[m(i,j)][m(v,w)] = pow(pow(abs(i - v)**p + abs(j - w)**p, 1/p), q)\n return C\n\n\ndef Preprocessing(h1, h2):\n assert(h1.size == h2.size)\n n = len(h1)\n for i in range(n):\n offset = min(h1[i], h2[i])\n h1[i] -= offset\n h2[i] -= offset \n \n return h1, h2\n \n \ndef WassersteinDualSimplex(h1, h2, M):\n \"\"\" Find the Wasserstein distance using the dual simplex \"\"\"\n n = len(h1)\n\n # Build model\n m = Model()\n m.setParam(GRB.Param.NumericFocus, 3)\n #m.setParam(GRB.Param.TimeLimit, 300)\n #m.setParam(GRB.Param.Presolve, 0) \n #m.setParam(GRB.Param.Threads, 1)\n # Options are: \n # -1=automatic, 0=primal simplex, 1=dual simplex, 2=barrier, \n # 3=concurrent, 4=deterministic concurrent.\n #m.setParam(GRB.Param.Method, 0)\n print('1. Start building model')\n # Create variables\n x = {} \n P = set()\n D = []\n for i in range(n):\n if h1[i] > 0:\n x[i] = {}\n for j in range(n):\n if h2[j] > 0:\n x[i][j] = m.addVar(ub=min(h1[i], h2[j]), obj=M[i][j])\n D.append((i,j))\n P.add(j)\n D = tuplelist(D)\n m.update()\n print('2. Add initial constraint sets')\n for i in x:\n m.addConstr(quicksum(x[i][j] for j in x[i]) <= h1[i])\n \n for j in P:\n m.addConstr(quicksum(x[i][j] for i,j in D.select('*',j)) >= h2[j])\n print('3. Start solution phase')\n # Solve the model\n m.optimize()\n \n return m.getAttr(GRB.Attr.ObjVal)\n\n#------------------------------------------\n# MAIN ENTRY POINT\n#------------------------------------------\nif __name__ == \"__main__\":\n filename1 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data32_1005.csv'\n M1 = np.loadtxt(open(filename1, \"rb\"), delimiter=\",\")\n M1 = np.array(M1.flatten())\n\n filename2 = 'D:\\Ricerca\\DOTA\\data\\DOTmark_1.0\\Data\\ClassicImages\\data32_1009.csv'\n M2 = np.loadtxt(open(filename2, \"rb\"), delimiter=\",\")\n M2 = np.array(M2.flatten())\n\n #M1,M2 = Preprocessing(M1,M2)\n\n\n C = ComputeDistanceMatrix(len(M1), p=2, q=1)\n print(WassersteinDualSimplex(M1, M2, C))"
] | [
[
"numpy.zeros",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
alvaro-budria/body2hands | [
"8ab4b206dc397c3b326f2b4ec9448c84ee8801fe"
] | [
"utils/load_utils.py"
] | [
"# Copyright (c) Facebook, Inc. and its affiliates.\nimport json\nimport numpy as np\nimport os, sys\nimport scipy\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d, Axes3D\nfrom scipy.spatial.transform import Rotation as R\n\nfrom shutil import copyfile\nfrom PIL import Image,ImageDraw\nfrom torchvision import transforms\nimport torch\n\n\nFEATURE_MAP = {\n 'arm2wh':((6*6), 42*6),\n}\n\nARMS_ONLY = [12,13,14,15,16,17] #arms for smpl\nEPSILON = 1e-10\n\n## helper for calculating mean and standard dev\ndef mean_std(feat, data, rot_idx):\n if feat == 'wh':\n mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]\n std = data.std(axis=2).std(axis=0)[np.newaxis,:, np.newaxis]\n std += EPSILON\n else:\n mean = data.mean(axis=2).mean(axis=0)[np.newaxis,:, np.newaxis]\n std = np.array([[[data.std()]]]).repeat(data.shape[1], axis=1)\n return mean, std\n\n\n## helper for calculating standardization stats\ndef calc_standard(train_X, train_Y, pipeline):\n rot_idx = -6\n feats = pipeline.split('2')\n in_feat, out_feat = feats[0], feats[1]\n body_mean_X, body_std_X = mean_std(in_feat, train_X, rot_idx)\n if in_feat == out_feat:\n body_mean_Y = body_mean_X\n body_std_Y = body_std_X\n else:\n body_mean_Y, body_std_Y = mean_std(out_feat, train_Y, rot_idx)\n return body_mean_X, body_std_X, body_mean_Y, body_std_Y\n\n\n## utility check if object is float\ndef is_float(n):\n try:\n float(n)\n return True\n except:\n return False\n\n\n## utility function to convert from r6d space to axis angle\ndef rot6d_to_aa(r6ds):\n res = np.zeros((r6ds.shape[0], 3))\n for i,row in enumerate(r6ds):\n np_r6d = np.expand_dims(row, axis=0)\n np_mat = np.reshape(np_rot6d_to_mat(np_r6d)[0], (3,3))\n np_mat = R.from_matrix(np_mat)\n aa = np_mat.as_rotvec()\n res[i,:] = aa\n return res\n\n\ndef np_mat_to_rot6d(np_mat):\n \"\"\" Get 6D rotation representation for rotation matrix.\n Implementation base on\n https://arxiv.org/abs/1812.07035\n [Inputs]\n flattened rotation matrix (last dimension is 9)\n [Returns]\n 6D rotation representation (last dimension is 6)\n \"\"\"\n shape = np_mat.shape\n\n if not ((shape[-1] == 3 and shape[-2] == 3) or (shape[-1] == 9)):\n raise AttributeError(\"The inputs in tf_matrix_to_rotation6d should be [...,9] or [...,3,3], \\\n but found tensor with shape {}\".format(shape[-1]))\n\n np_mat = np.reshape(np_mat, [-1, 3, 3])\n np_r6d = np.concatenate([np_mat[...,0], np_mat[...,1]], axis=-1)\n\n if len(shape) == 1:\n np_r6d = np.reshape(np_r6d, [6])\n\n return np_r6d\n\n\n## utility function to convert from axis angle to r6d space\ndef aa_to_rot6d(vecs):\n res = np.zeros((vecs.shape[0], 6))\n for i,row in enumerate(vecs):\n np_mat = R.from_rotvec(row)\n np_mat = np_mat.as_dcm()\n np_mat = np.expand_dims(np_mat, axis=0) #e.g. batch 1\n np_r6d = np_mat_to_rot6d(np_mat)[0]\n res[i,:] = np_r6d\n return res\n\n\n## utility function to convert from r6d space to rotation matrix\ndef np_rot6d_to_mat(np_r6d):\n shape = np_r6d.shape\n np_r6d = np.reshape(np_r6d, [-1,6])\n x_raw = np_r6d[:,0:3]\n y_raw = np_r6d[:,3:6]\n\n x = x_raw / np.linalg.norm(x_raw, ord=2, axis=-1)\n z = np.cross(x, y_raw)\n z = z / np.linalg.norm(z, ord=2, axis=-1)\n y = np.cross(z, x)\n\n x = np.reshape(x, [-1,3,1])\n y = np.reshape(y, [-1,3,1])\n z = np.reshape(z, [-1,3,1])\n np_matrix = np.concatenate([x,y,z], axis=-1)\n\n if len(shape) == 1:\n np_matrix = np.reshape(np_matrix, [9])\n else:\n output_shape = shape[:-1] + (9,)\n np_matrix = np.reshape(np_matrix, output_shape)\n\n return np_matrix\n\n\n## utility to load windows from outside files\ndef load_windows(data_dir, pipeline, num_samples=None, use_euler=False, require_image=False, require_audio=False, hand3d_image=False, use_lazy=False, test_smpl=False, temporal=False):\n preload_path = os.path.join(data_dir, 'filepaths.npy')\n if os.path.exists(preload_path):\n filepaths = np.load(preload_path, allow_pickle=True)\n feats = pipeline.split('2')\n in_feat, out_feat = feats[0], feats[1]\n p0_size, p1_size = FEATURE_MAP[pipeline]\n\n if os.path.exists(os.path.join(data_dir, 'full_bodies2.npy')):\n print('using super quick load', data_dir)\n p1_windows = np.load(os.path.join(data_dir, 'full_hands2.npy'), allow_pickle=True)\n p0_windows = np.load(os.path.join(data_dir, 'full_bodies2.npy'), allow_pickle=True)\n B,T = p0_windows.shape[0], p0_windows.shape[1]\n if in_feat == 'arm':\n p0_windows = np.reshape(p0_windows, (B,T,-1,6))\n p0_windows = p0_windows[:,:,ARMS_ONLY,:]\n p0_windows = np.reshape(p0_windows, (B,T,-1))\n if require_image:\n image_windows = np.load(os.path.join(data_dir, 'full_resnet.npy'), allow_pickle=True)\n\n if require_image:\n p0_windows = (p0_windows, image_windows)\n\n return p0_windows, p1_windows, filepaths, None\n\n\n## utility to save results\ndef save_results(paths, output, pipeline, base_path, tag=''):\n feats = pipeline.split('2')\n out_feat = feats[1]\n paths = np.array(paths)\n\n for i in range(paths.shape[0]):\n print('working on', paths[i,0,0])\n for j in range(paths.shape[1]):\n vid_path, pnum, frame_idx = paths[i][j]\n vid_path = os.path.join(base_path, vid_path)\n if not os.path.exists(os.path.join(vid_path, 'results/')):\n os.makedirs(os.path.join(vid_path, 'results/'))\n\n if out_feat == 'wh':\n pred_dir = os.path.join(vid_path, 'results/{}predicted_body_3d_frontal/'.format(tag))\n if not os.path.exists(pred_dir):\n os.makedirs(pred_dir)\n pred_path = os.path.join(pred_dir, '{:04d}.txt'.format(int(frame_idx)))\n\n ## set the ground truth estimated full body pose parameters for viewing\n gt_path = os.path.join(vid_path, 'body_3d_frontal/{:04d}.txt'.format(int(frame_idx)))\n with open(gt_path) as f:\n lines = f.readlines()\n cam = lines[0]\n cam = [float(n) for n in cam.split(' ') if is_float(n)]\n pose = lines[1]\n pose = [float(n) for n in pose.split(' ') if is_float(n)]\n shape = lines[2]\n shape = [float(n) for n in shape.split(' ') if is_float(n)]\n idk = lines[3]\n idk = [float(n) for n in idk.split(' ') if is_float(n)]\n ## DONE set the ground truth estimated full body pose parameters for viewing\n\n\n ## fill in the predicted hands to the full body pose\n pose = np.reshape(pose, (62,3))\n if out_feat == 'wh':\n hands_r6d = np.reshape(output[i][j],(42,6))\n hands = rot6d_to_aa(hands_r6d)\n pose[-42:,:] = hands\n pose = np.reshape(pose, (-1))\n ## DONE fill in the predicted hands to the full body pose\n\n\n ## writing prediciton to file\n with open(pred_path, 'w') as f:\n for item in cam:\n f.write(\"%s \"%item)\n f.write(\"\\n\")\n for item in pose:\n f.write(\"%s \"%item)\n f.write(\"\\n\")\n for item in shape:\n f.write(\"%s \"%item)\n f.write(\"\\n\")\n for item in idk:\n f.write(\"%s \"%item)\n ## DONE writing prediciton to file\n"
] | [
[
"scipy.spatial.transform.Rotation.from_rotvec",
"numpy.expand_dims",
"numpy.reshape",
"scipy.spatial.transform.Rotation.from_matrix",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.cross",
"numpy.load",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.5",
"1.4"
],
"tensorflow": []
}
] |
Wookhwang/Election_Predictions_Project | [
"d550e58810df192ca1ec7d08eb817afb10d63aab"
] | [
"Morpheme/KoNLPy_DataFrame.py"
] | [
"import pandas as pd\r\nimport numpy as np\r\nfrom konlpy.tag import Okt\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom tqdm import tqdm\r\n\r\n'''\r\nif __name__ == '__main__':\r\n # 각 형태소별로 분류(Tagging)해주는 Okt 객체를 불러온다.\r\n tagger = Okt()\r\n\r\n # nouns 함수를 통해 명사에 해당하는 부분만 리스트 형태로 반환한다.\r\n noun_list = tagger.nouns('KoNLPy에 오신 것을 환영합니다.')\r\n\r\n print(noun_list)\r\n\r\n'''\r\n# DTM을 편리하게 만들어주기 위해 Scikit-Learn에서 제공하는 CountVectorizer를 import 한다.\r\n\r\n\r\nif __name__ == '__main__':\r\n # 타이틀 리스트를 불러와서 title_list 변수에 저장한다.\r\n t_file_name = open('C:/Users/khw08/Desktop/OSBAD_Project/Morphene/Title_List.txt', 'r', encoding='utf-8')\r\n\r\n title_list = []\r\n for line in t_file_name.readlines():\r\n # txt파일을 readlines로 불러오면 개행 문자도 함께 읽어오기 때문에 인덱싱으로 처리해준다.\r\n title_list.append(line[:-1])\r\n\r\n t_file_name.close()\r\n\r\n s_file_name = open('C:/Users/khw08/Desktop/OSBAD_Project/Morphene/Stop_Words.txt', 'r', encoding='utf-8')\r\n\r\n stop_words_list = []\r\n for line in s_file_name.readlines():\r\n # 위에서는 인덱싱으로 처리했지만 rstrip 함수를 사용하여 공백을 제거할 수도 있다.\r\n stop_words_list.append(line.rstrip())\r\n\r\n s_file_name.close()\r\n # print(stop_words_list)\r\n\r\n # pandas의 read_csv 함수를 이용하여 csv 파일을 불러온다.\r\n dataset = pd.read_csv('C:/Users/khw08/Desktop/OSBAD_Project/Morphene/M&D_train_CSV.csv', encoding='utf-8')\r\n\r\n\r\n # 각 형태소별로 분류(Tagging)해주는 Okt 객체를 불러온다.\r\n tagger = Okt()\r\n\r\n for title in tqdm(title_list, desc='타이틀 리스트 진행도'): # title_list에 대해 반복문을 실행\r\n # 각 타이틀에 대한 6770개 문서의 DTM을 표현하기 위해\r\n # CountVectorizer 객체를 선언\r\n cv = CountVectorizer()\r\n\r\n # 각 문서들의 말뭉치(corpus)를 저장할 리스트 선언\r\n corpus = []\r\n\r\n # 각 타이틀에 대한 문서들의 말 뭉치를 저장한다. (데이터가 많으면 이 부분에서 장시간이 소요될 수 있다.)\r\n for doc_num in tqdm(range(len(dataset)), desc='문서 진행도'):\r\n # 각 말뭉치에서 명사 리스트를 만든다.\r\n noun_list = tagger.nouns(dataset[title].loc[doc_num])\r\n\r\n # 이를 문자열로 저장해야하기 때문에 join함수로 공백으로 구분해 corpus에 append한다.\r\n corpus.append(' '.join(noun_list))\r\n\r\n # CountVectorizer의 fit_transform 함수를 통해 DTM을 한번에 생성할 수 있다.\r\n DTM_Array = cv.fit_transform(corpus).toarray()\r\n\r\n # feature_names 함수를 사용하면 DTM의 각 열(column)이 어떤 단어에 해당하는지 알 수 있다.\r\n feature_names = cv.get_feature_names()\r\n\r\n # 추출해낸 데이터를 DataFrame 형식으로 변환한다.\r\n DTM_DataFrmae = pd.DataFrame(DTM_Array, columns=feature_names)\r\n\r\n # 열 제거는 drop 함수를 사용하며 axis 속성을 columns로 준 후 inplace를 True로 한다.\r\n DTM_DataFrmae.drop(stop_words_list, axis='columns', inplace=True)\r\n\r\n # 최종적으로 DTM을 csv 파일로 저장한다.\r\n DTM_DataFrmae.to_csv('C:/Users/khw08/Desktop/OSBAD_Project/Morphene/KoNLPY_Youtube_M&D_train_CSV_4.csv', encoding='utf-8-sig')\r\n\r\n"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Kabongosalomon/task-dataset-metric-nli-extraction | [
"2f7ecd7e1e4a456d2e23d9384f11c453653c4351"
] | [
"tdm_style_one_to_two.py"
] | [
"# Imports \nimport os\nimport json\nimport argparse\nimport time\nimport ipdb\nimport pickle\nimport logging\nfrom shutil import copyfile, rmtree, copytree\nfrom filecmp import dircmp\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import deque\n\n# # Internal inport \nfrom utils.helpers import get_start_lenght\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"Run TDM\")\n parser.add_argument(\"-psource\", \"--path_source_folder\", \n default=None, \n help=\"Path to source folder to be cloned in the format of folder clone\")\n parser.add_argument(\"-ptgtclone\", \"--path_target_clone\", \n default=None, \n help=\"Path to the folder to be cloned with a different context from source\")\n \n args = parser.parse_args()\n\n source_path = args.path_source_folder\n target_path = args.path_target_clone\n\n start_time = time.time()\n\n if os.path.exists(f\"{target_path}twofoldwithunk/\") :\n # https://stackoverflow.com/questions/48892772/how-to-remove-a-directory-is-os-removedirs-and-os-rmdir-only-used-to-delete-emp\n rmtree(f\"{target_path}twofoldwithunk/\")\n \n \n copytree(f\"{source_path}twofoldwithunk/\", f\"{target_path}twofoldwithunk/\")\n # !cp ${source_path}/twofoldwithunk ${target_path}\n\n train_target_f1_pd = pd.read_csv(f\"{target_path}twofoldwithunk/fold1/train.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n dev_target_f1_pd = pd.read_csv(f\"{target_path}twofoldwithunk/fold1/dev.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n train_target_f2_pd = pd.read_csv(f\"{target_path}twofoldwithunk/fold2/train.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n dev_target_f2_pd = pd.read_csv(f\"{target_path}twofoldwithunk/fold2/dev.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n trainOutput_source_pd = pd.read_csv(f\"{source_path}trainOutput.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n trainOutput_target_pd = pd.read_csv(f\"{target_path}trainOutput.tsv\", \n sep=\"\\t\", names=[\"label\", \"title\", \"TDM\", \"Context\"])\n\n\n list_trainOutput_source_pd_uniq = list(trainOutput_source_pd.title.unique())\n\n list_trainOutput_target_pd_uniq = list(trainOutput_target_pd.title.unique())\n\n dict_source_paper_context = {}\n for paper in list_trainOutput_source_pd_uniq:\n dict_source_paper_context[paper]=trainOutput_source_pd[trainOutput_source_pd.title==paper].Context.values[0]\n \n dict_target_paper_context = {}\n for paper in list_trainOutput_target_pd_uniq:\n dict_target_paper_context[paper]=trainOutput_target_pd[trainOutput_target_pd.title==paper].Context.values[0]\n\n print(\"---------------------------------------------\")\n\n len_context_source = get_start_lenght(dict_source_paper_context, \n limit=\"Source\", \n title=\"Source\")\n\n len_context_target = get_start_lenght(dict_target_paper_context, \n limit=\"Target\",\n title=\"Target\")\n\n train_target_f1_pd[\"Context\"] = train_target_f1_pd.apply(lambda x : dict_target_paper_context[x['title']] if x['title'] in dict_target_paper_context.keys() else x['title'], axis=1)\n dev_target_f1_pd[\"Context\"] = dev_target_f1_pd.apply(lambda x : dict_target_paper_context[x['title']] if x['title'] in dict_target_paper_context.keys() else x['title'], axis=1)\n\n train_target_f2_pd[\"Context\"] = train_target_f2_pd.apply(lambda x : dict_target_paper_context[x['title']] if x['title'] in dict_target_paper_context.keys() else x['title'], axis=1)\n dev_target_f2_pd[\"Context\"] = dev_target_f2_pd.apply(lambda x : dict_target_paper_context[x['title']] if x['title'] in dict_target_paper_context.keys() else x['title'], axis=1)\n\n trainOutput_target_pd[\"Context\"] = trainOutput_target_pd.apply(lambda x : dict_target_paper_context[x['title']] if x['title'] in dict_target_paper_context.keys() else x['title'], axis=1)\n\n\n train_target_f1_pd.to_csv(path_or_buf=f\"{target_path}twofoldwithunk/fold1/train.tsv\", \n sep=\"\\t\", header=None, index=False)\n dev_target_f1_pd.to_csv(path_or_buf=f\"{target_path}twofoldwithunk/fold1/dev.tsv\", \n sep=\"\\t\", header=None, index=False)\n\n train_target_f2_pd.to_csv(path_or_buf=f\"{target_path}twofoldwithunk/fold2/train.tsv\", \n sep=\"\\t\", header=None, index=False)\n dev_target_f2_pd.to_csv(path_or_buf=f\"{target_path}twofoldwithunk/fold2/dev.tsv\", \n sep=\"\\t\", header=None, index=False)\n \n list_train_target_f1_pd_uniq = list(train_target_f1_pd.title.unique())\n list_dev_target_f1_pd_uniq = list(dev_target_f1_pd.title.unique())\n\n list_train_target_f2_pd_uniq = list(train_target_f2_pd.title.unique())\n list_dev_target_f2_pd_uniq = list(dev_target_f2_pd.title.unique())\n\n dict_check_target_train_test_paper_context = {}\n for paper in list_train_target_f1_pd_uniq:\n dict_check_target_train_test_paper_context[paper]=train_target_f1_pd[train_target_f1_pd.title==paper].Context.values[0]\n for paper in list_dev_target_f1_pd_uniq:\n dict_check_target_train_test_paper_context[paper]=dev_target_f1_pd[dev_target_f1_pd.title==paper].Context.values[0]\n for paper in list_train_target_f2_pd_uniq:\n dict_check_target_train_test_paper_context[paper]=train_target_f2_pd[train_target_f2_pd.title==paper].Context.values[0]\n for paper in list_dev_target_f2_pd_uniq:\n dict_check_target_train_test_paper_context[paper]=dev_target_f2_pd[dev_target_f2_pd.title==paper].Context.values[0]\n\n print(\"---------------------------------------------\")\n\n len_context_source = get_start_lenght(dict_source_paper_context, \n limit=\"Source\", \n title=\"Source\")\n\n len_context_target = get_start_lenght(dict_check_target_train_test_paper_context, \n limit=\"Target After Update\",\n title=\"Target After Update\")\n\n print(\"---------------------------------------------\")\n\n\n runtime = round(time.time() - start_time, 3)\n print(\"runtime: %s minutes \" % (runtime/60))\n print(\"#####################################\")\n print(args)\n print(\"#####################################\")\n print('done.')"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
JunnYu/Paddle-AI-Writer | [
"8d211f9e60aeed323b6330065668f54350514c70"
] | [
"src/model.py"
] | [
"########################################################################################################\n# AI人工智障写作 - https://github.com/BlinkDL/AI-Writer\n########################################################################################################\n\nimport logging\nimport math\n\nimport torch\nimport torch.nn as nn\nfrom torch.nn import functional as F\n\nlogger = logging.getLogger(__name__)\n\n\nclass RWKV_TimeMix(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n assert config.n_attn % config.n_head == 0\n self.layer_id = layer_id\n self.ctx_len = config.ctx_len\n self.n_head = config.n_head\n self.head_size = config.n_attn // config.n_head\n\n self.time_ww = nn.Parameter(\n torch.ones(config.n_head, config.ctx_len, config.ctx_len)\n )\n self.time_gamma = nn.Parameter(torch.ones(config.ctx_len, 1))\n\n self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))\n\n self.key = nn.Linear(config.n_embd, config.n_attn)\n self.value = nn.Linear(config.n_embd, config.n_attn)\n self.receptance = nn.Linear(config.n_embd, config.n_attn)\n\n self.output = nn.Linear(config.n_attn, config.n_embd)\n\n self.key.scale_init = 0\n self.receptance.scale_init = 0\n self.output.scale_init = 0\n\n def forward(self, x):\n B, T, C = x.size()\n\n x = torch.cat([self.time_shift(x[:, :, : C // 2]), x[:, :, C // 2 :]], dim=-1)\n\n k = self.key(x)\n v = self.value(x)\n r = self.receptance(x)\n\n k = torch.clamp(k, max=30, min=-60)\n k = torch.exp(k)\n sum_k = torch.cumsum(k, dim=1)\n\n kv = (k * v).view(B, T, self.n_head, self.head_size)\n\n wkv = (\n (torch.einsum(\"htu,buhc->bthc\", self.time_ww[:, :T, :T], kv))\n .contiguous()\n .view(B, T, -1)\n )\n\n rwkv = torch.sigmoid(r) * wkv / sum_k\n\n rwkv = self.output(rwkv)\n return rwkv * self.time_gamma[:T, :]\n\n\nclass RWKV_ChannelMix(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n self.layer_id = layer_id\n self.time_shift = nn.ZeroPad2d((0, 0, 1, -1))\n\n hidden_sz = 5 * config.n_ffn // 2\n self.key = nn.Linear(config.n_embd, hidden_sz)\n self.value = nn.Linear(config.n_embd, hidden_sz)\n self.weight = nn.Linear(hidden_sz, config.n_embd)\n self.receptance = nn.Linear(config.n_embd, config.n_embd)\n\n self.receptance.scale_init = 0\n self.weight.scale_init = 0\n\n def forward(self, x):\n B, T, C = x.size()\n\n x = torch.cat([self.time_shift(x[:, :, : C // 2]), x[:, :, C // 2 :]], dim=-1)\n k = self.key(x)\n v = self.value(x)\n r = self.receptance(x)\n\n wkv = self.weight(F.mish(k) * v)\n\n rwkv = torch.sigmoid(r) * wkv\n\n return rwkv\n\n\nclass GPTConfig:\n def __init__(self, vocab_size, ctx_len, **kwargs):\n self.vocab_size = vocab_size\n self.ctx_len = ctx_len\n for k, v in kwargs.items():\n setattr(self, k, v)\n\n\nclass Block(nn.Module):\n def __init__(self, config, layer_id):\n super().__init__()\n self.config = config\n\n self.ln1 = nn.LayerNorm(config.n_embd)\n self.ln2 = nn.LayerNorm(config.n_embd)\n\n self.attn = RWKV_TimeMix(config, layer_id)\n self.mlp = RWKV_ChannelMix(config, layer_id)\n\n def forward(self, x):\n\n x = x + self.attn(self.ln1(x))\n x = x + self.mlp(self.ln2(x))\n\n return x\n\n\nclass GPT(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.config = config\n\n self.tok_emb = nn.Embedding(config.vocab_size, config.n_embd)\n\n self.blocks = nn.Sequential(*[Block(config, i) for i in range(config.n_layer)])\n\n self.ln_f = nn.LayerNorm(config.n_embd)\n self.time_out = nn.Parameter(torch.ones(1, config.ctx_len, 1))\n self.head = nn.Linear(config.n_embd, config.vocab_size, bias=False)\n\n self.head_q = nn.Linear(config.n_embd, 256)\n self.head_k = nn.Linear(config.n_embd, 256)\n self.register_buffer(\n \"copy_mask\", torch.tril(torch.ones(config.ctx_len, config.ctx_len))\n )\n\n self.ctx_len = config.ctx_len\n\n logger.info(\n \"number of parameters: %e\", sum(p.numel() for p in self.parameters())\n )\n\n def get_ctx_len(self):\n return self.ctx_len\n\n def forward(self, idx, targets=None):\n B, T = idx.size()\n assert T <= self.ctx_len, \"Cannot forward, because len(input) > model ctx_len.\"\n\n x = self.tok_emb(idx)\n\n x = self.blocks(x)\n\n x = self.ln_f(x)\n q = self.head_q(x)[:, :T, :]\n k = self.head_k(x)[:, :T, :]\n c = (q @ k.transpose(-2, -1)) * (1.0 / 256)\n c = c.masked_fill(self.copy_mask[:T, :T] == 0, 0)\n c = c @ F.one_hot(idx, num_classes=self.config.vocab_size).float()\n x = x * self.time_out[:, :T, :]\n x = self.head(x) + c\n\n loss = None\n if targets is not None:\n loss = F.cross_entropy(x.view(-1, x.size(-1)), targets.view(-1))\n\n return x, loss\n"
] | [
[
"torch.nn.ZeroPad2d",
"torch.sigmoid",
"torch.ones",
"torch.nn.functional.mish",
"torch.einsum",
"torch.nn.Embedding",
"torch.nn.LayerNorm",
"torch.exp",
"torch.nn.Linear",
"torch.nn.functional.one_hot",
"torch.clamp",
"torch.cumsum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gpspelle/huawei-AI | [
"967842796e0809fa385d218e2e901c3df05df83e"
] | [
"submissions/xgburro/regressor.py"
] | [
"\nimport numpy as np\nfrom sklearn import multioutput\nimport xgboost as xgb\n\n\nclass Regressor():\n def _init_(self):\n super()._init_()\n self.model = None\n\n def fit(self, X, y):\n # Get data and create train data loaders\n X_32 = np.array([_[1] for _ in X])\n self.model = multioutput.MultiOutputRegressor(xgb.XGBRegressor()).fit(X_32, y)\n\n def predict(self, X):\n dtest = np.array([_ for _ in X[:,1]])\n preds = self.model.predict(dtest)\n preds = preds * (preds > 0)\n return preds\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
learningmatter-mit/Atomistic-Adversarial-Attacks | [
"9efb2dc85c6aad03db9f43e30d0fcb478c52e590"
] | [
"robust/actlearn/pipeline.py"
] | [
"import torch as ch\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader\n\nfrom nff.train import hooks as nff_hooks\n\nfrom robust.models import NnEnsemble, NnRegressor\nfrom robust import metrics, hooks, train, loss, attacks, data\n\n\nDEFAULT_NAME = \"train\"\nBATCH_SIZE = 50\nMAX_EPOCHS = 300\n\n\nclass ForwardPipeline:\n def __init__(\n self,\n dset,\n model_params,\n loss_params,\n optim_params,\n train_params,\n attack_params,\n name=DEFAULT_NAME,\n dset_train=None,\n dset_train_weight=4,\n ):\n self.name = name\n self.dset = dset\n\n self.dset_train_weight = dset_train_weight\n if dset_train is None:\n self.dset_train = data.PotentialDataset.from_empty_dataset()\n else:\n self.dset_train = dset_train\n\n self.model_params = model_params\n self.loss_params = loss_params\n self.optim_params = optim_params\n self.train_params = train_params\n self.attack_params = attack_params\n\n self.train_loader, self.val_loader, self.test_loader = self.get_loaders(\n train_params[\"batch_size\"]\n )\n self.trainer = self.get_trainer()\n\n def copy(self, new_name=None):\n if new_name is None:\n new_name = self.name\n\n return self.__class__(\n self.dset,\n self.model_params.copy(),\n self.loss_params.copy(),\n self.optim_params.copy(),\n self.train_params.copy(),\n self.attack_params.copy(),\n new_name,\n )\n\n def augment_train_set(self, train):\n newtrain = train\n for i in range(self.dset_train_weight):\n newtrain += self.dset_train.copy()\n return newtrain\n\n def get_loaders(self, batch_size):\n train, val, test = self.dset.split_train_validation_test()\n\n train_loader = DataLoader(\n self.augment_train_set(train),\n batch_size=batch_size,\n shuffle=True,\n )\n val_loader = DataLoader(val, batch_size=batch_size, shuffle=True)\n test_loader = DataLoader(test, batch_size=batch_size, shuffle=True)\n\n return train_loader, val_loader, test_loader\n\n def create_model(self):\n num_nets = self.model_params[\"num_networks\"]\n model = NnEnsemble([NnRegressor(**self.model_params) for _ in range(num_nets)])\n\n trainable_params = filter(lambda p: p.requires_grad, model.parameters())\n optimizer = Adam(trainable_params, **self.optim_params)\n\n return model, optimizer\n\n def get_loss_fn(self):\n return loss.MeanSquareLoss(**self.loss_params)\n\n def get_adv_loss(self):\n loss_type = self.attack_params.get('uncertainty', 'forces')\n\n if loss_type == 'energy':\n return loss.AdvLossEnergyUncertainty(self.dset + self.dset_train, **self.loss_params)\n\n return loss.AdvLoss(self.dset + self.dset_train, **self.loss_params)\n\n def get_metrics(self):\n return [metrics.MAE(1, name=\"MAE Energy\"), metrics.MAE(2, name=\"MAE Forces\")]\n\n def get_hooks(self, optimizer):\n return [\n nff_hooks.MaxEpochHook(self.train_params.get(\"max_epochs\", MAX_EPOCHS)),\n nff_hooks.PrintingHook(\n self.name,\n metrics=self.get_metrics(),\n separator=\" | \",\n time_strf=\"%M:%S\",\n every_n_epochs=25,\n ),\n nff_hooks.ReduceLROnPlateauHook(\n optimizer=optimizer,\n patience=50,\n factor=0.5,\n min_lr=1e-7,\n window_length=1,\n stop_after_min=True,\n ),\n hooks.RequiresGradHook(),\n ]\n\n def get_trainer(self):\n model, optimizer = self.create_model()\n\n T = train.Trainer(\n model_path=self.name,\n model=model,\n loss_fn=self.get_loss_fn(),\n optimizer=optimizer,\n train_loader=self.train_loader,\n validation_loader=self.val_loader,\n checkpoint_interval=50,\n hooks=self.get_hooks(optimizer),\n )\n\n return T\n\n def train(self, device, n_epochs):\n self.trainer.train(device, n_epochs)\n return self.trainer.get_best_model()\n\n def evaluate(self, loader, device):\n return self.trainer.evaluate(loader, device)\n\n def get_attacker(self):\n attack = attacks.SumAttack(\n **self.attack_params,\n )\n\n if self.attack_params.get('random_attack', False):\n attacker_cls = attacks.RandomAttacker\n else:\n attacker_cls = attacks.AdversarialAttacker\n\n return attacker_cls(\n self.trainer.get_best_model(),\n self.get_adv_loss(),\n self.train_loader,\n attack,\n **self.attack_params,\n )\n\n def attack(self, device, n_epochs):\n self.attacker = self.get_attacker()\n return self.attacker.train(device, n_epochs)\n"
] | [
[
"torch.optim.Adam",
"torch.utils.data.DataLoader"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhouruqin/MaskNet | [
"13eba26316b3cd5f6f3aee02ebd2458d90de023e"
] | [
"learning3d/models/flownet3d.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom time import time\nimport numpy as np\nfrom .. utils import pointnet2_utils as pointutils\n\ndef timeit(tag, t):\n print(\"{}: {}s\".format(tag, time() - t))\n return time()\n\ndef pc_normalize(pc):\n l = pc.shape[0]\n centroid = np.mean(pc, axis=0)\n pc = pc - centroid\n m = np.max(np.sqrt(np.sum(pc**2, axis=1)))\n pc = pc / m\n return pc\n\ndef square_distance(src, dst):\n \"\"\"\n Calculate Euclid distance between each two points.\n src^T * dst = xn * xm + yn * ym + zn * zm;\n sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn;\n sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm;\n dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2\n = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst\n Input:\n src: source points, [B, N, C]\n dst: target points, [B, M, C]\n Output:\n dist: per-point square distance, [B, N, M]\n \"\"\"\n B, N, _ = src.shape\n _, M, _ = dst.shape\n dist = -2 * torch.matmul(src, dst.permute(0, 2, 1))\n dist += torch.sum(src ** 2, -1).view(B, N, 1)\n dist += torch.sum(dst ** 2, -1).view(B, 1, M)\n return dist\n\n\ndef index_points(points, idx):\n \"\"\"\n Input:\n points: input points data, [B, N, C]\n idx: sample index data, [B, S]\n Return:\n new_points:, indexed points data, [B, S, C]\n \"\"\"\n device = points.device\n B = points.shape[0]\n view_shape = list(idx.shape)\n view_shape[1:] = [1] * (len(view_shape) - 1)\n repeat_shape = list(idx.shape)\n repeat_shape[0] = 1\n batch_indices = torch.arange(B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape)\n new_points = points[batch_indices, idx, :]\n return new_points\n\n\ndef farthest_point_sample(xyz, npoint):\n \"\"\"\n Input:\n xyz: pointcloud data, [B, N, C]\n npoint: number of samples\n Return:\n centroids: sampled pointcloud index, [B, npoint]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n centroids = torch.zeros(B, npoint, dtype=torch.long).to(device)\n distance = torch.ones(B, N).to(device) * 1e10\n farthest = torch.randint(0, N, (B,), dtype=torch.long).to(device)\n batch_indices = torch.arange(B, dtype=torch.long).to(device)\n for i in range(npoint):\n centroids[:, i] = farthest\n centroid = xyz[batch_indices, farthest, :].view(B, 1, 3)\n dist = torch.sum((xyz - centroid) ** 2, -1)\n mask = dist < distance\n distance[mask] = dist[mask]\n farthest = torch.max(distance, -1)[1]\n return centroids\n\ndef knn_point(k, pos1, pos2):\n '''\n Input:\n k: int32, number of k in k-nn search\n pos1: (batch_size, ndataset, c) float32 array, input points\n pos2: (batch_size, npoint, c) float32 array, query points\n Output:\n val: (batch_size, npoint, k) float32 array, L2 distances\n idx: (batch_size, npoint, k) int32 array, indices to input points\n '''\n B, N, C = pos1.shape\n M = pos2.shape[1]\n pos1 = pos1.view(B,1,N,-1).repeat(1,M,1,1)\n pos2 = pos2.view(B,M,1,-1).repeat(1,1,N,1)\n dist = torch.sum(-(pos1-pos2)**2,-1)\n val,idx = dist.topk(k=k,dim = -1)\n return torch.sqrt(-val), idx\n\n\ndef query_ball_point(radius, nsample, xyz, new_xyz):\n \"\"\"\n Input:\n radius: local region radius\n nsample: max sample number in local region\n xyz: all points, [B, N, C]\n new_xyz: query points, [B, S, C]\n Return:\n group_idx: grouped points index, [B, S, nsample]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n _, S, _ = new_xyz.shape\n group_idx = torch.arange(N, dtype=torch.long).to(device).view(1, 1, N).repeat([B, S, 1])\n sqrdists = square_distance(new_xyz, xyz)\n group_idx[sqrdists > radius ** 2] = N\n mask = group_idx != N\n cnt = mask.sum(dim=-1)\n group_idx = group_idx.sort(dim=-1)[0][:, :, :nsample]\n group_first = group_idx[:, :, 0].view(B, S, 1).repeat([1, 1, nsample])\n mask = group_idx == N\n group_idx[mask] = group_first[mask]\n return group_idx, cnt\n\n\ndef sample_and_group(npoint, radius, nsample, xyz, points, returnfps=False):\n \"\"\"\n Input:\n npoint:\n radius:\n nsample:\n xyz: input points position data, [B, N, C]\n points: input points data, [B, N, D]\n Return:\n new_xyz: sampled points position data, [B, 1, C]\n new_points: sampled points data, [B, 1, N, C+D]\n \"\"\"\n B, N, C = xyz.shape\n S = npoint\n fps_idx = farthest_point_sample(xyz, npoint) # [B, npoint, C]\n new_xyz = index_points(xyz, fps_idx)\n idx, _ = query_ball_point(radius, nsample, xyz, new_xyz)\n grouped_xyz = index_points(xyz, idx) # [B, npoint, nsample, C]\n grouped_xyz_norm = grouped_xyz - new_xyz.view(B, S, 1, C)\n if points is not None:\n grouped_points = index_points(points, idx)\n new_points = torch.cat([grouped_xyz_norm, grouped_points], dim=-1) # [B, npoint, nsample, C+D]\n else:\n new_points = grouped_xyz_norm\n if returnfps:\n return new_xyz, new_points, grouped_xyz, fps_idx\n else:\n return new_xyz, new_points\n\n\ndef sample_and_group_all(xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, N, C]\n points: input points data, [B, N, D]\n Return:\n new_xyz: sampled points position data, [B, 1, C]\n new_points: sampled points data, [B, 1, N, C+D]\n \"\"\"\n device = xyz.device\n B, N, C = xyz.shape\n new_xyz = torch.zeros(B, 1, C).to(device)\n grouped_xyz = xyz.view(B, 1, N, C)\n if points is not None:\n new_points = torch.cat([grouped_xyz, points.view(B, 1, N, -1)], dim=-1)\n else:\n new_points = grouped_xyz\n return new_xyz, new_points\n\nclass PointNetSetAbstraction(nn.Module):\n def __init__(self, npoint, radius, nsample, in_channel, mlp, group_all):\n super(PointNetSetAbstraction, self).__init__()\n self.npoint = npoint\n self.radius = radius\n self.nsample = nsample\n self.group_all = group_all\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel+3 # TODO:\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias = False))\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n \n if group_all:\n self.queryandgroup = pointutils.GroupAll()\n else:\n self.queryandgroup = pointutils.QueryAndGroup(radius, nsample)\n\n def forward(self, xyz, points):\n \"\"\"\n Input:\n xyz: input points position data, [B, C, N]\n points: input points data, [B, D, N]\n Return:\n new_xyz: sampled points position data, [B, S, C]\n new_points_concat: sample points feature data, [B, S, D']\n \"\"\"\n device = xyz.device\n B, C, N = xyz.shape\n xyz_t = xyz.permute(0, 2, 1).contiguous()\n # if points is not None:\n # points = points.permute(0, 2, 1).contiguous()\n\n # 选取邻域点\n if self.group_all == False:\n fps_idx = pointutils.furthest_point_sample(xyz_t, self.npoint) # [B, N]\n new_xyz = pointutils.gather_operation(xyz, fps_idx) # [B, C, N]\n else:\n new_xyz = xyz\n new_points = self.queryandgroup(xyz_t, new_xyz.transpose(2, 1).contiguous(), points) # [B, 3+C, N, S]\n \n # new_xyz: sampled points position data, [B, C, npoint]\n # new_points: sampled points data, [B, C+D, npoint, nsample]\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n new_points = F.relu(bn(conv(new_points)))\n\n new_points = torch.max(new_points, -1)[0]\n return new_xyz, new_points\n\nclass FlowEmbedding(nn.Module):\n def __init__(self, radius, nsample, in_channel, mlp, pooling='max', corr_func='concat', knn = True):\n super(FlowEmbedding, self).__init__()\n self.radius = radius\n self.nsample = nsample\n self.knn = knn\n self.pooling = pooling\n self.corr_func = corr_func\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n if corr_func is 'concat':\n last_channel = in_channel*2+3\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv2d(last_channel, out_channel, 1, bias=False))\n self.mlp_bns.append(nn.BatchNorm2d(out_channel))\n last_channel = out_channel\n\n def forward(self, pos1, pos2, feature1, feature2):\n \"\"\"\n Input:\n xyz1: (batch_size, 3, npoint)\n xyz2: (batch_size, 3, npoint)\n feat1: (batch_size, channel, npoint)\n feat2: (batch_size, channel, npoint)\n Output:\n xyz1: (batch_size, 3, npoint)\n feat1_new: (batch_size, mlp[-1], npoint)\n \"\"\"\n pos1_t = pos1.permute(0, 2, 1).contiguous()\n pos2_t = pos2.permute(0, 2, 1).contiguous()\n B, N, C = pos1_t.shape\n if self.knn:\n _, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)\n else:\n # If the ball neighborhood points are less than nsample,\n # than use the knn neighborhood points\n idx, cnt = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t)\n # 利用knn取最近的那些点\n _, idx_knn = pointutils.knn(self.nsample, pos1_t, pos2_t)\n cnt = cnt.view(B, -1, 1).repeat(1, 1, self.nsample)\n idx = idx_knn[cnt > (self.nsample-1)]\n \n pos2_grouped = pointutils.grouping_operation(pos2, idx) # [B, 3, N, S]\n pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B, 3, N, S]\n \n feat2_grouped = pointutils.grouping_operation(feature2, idx) # [B, C, N, S]\n if self.corr_func=='concat':\n feat_diff = torch.cat([feat2_grouped, feature1.view(B, -1, N, 1).repeat(1, 1, 1, self.nsample)], dim = 1)\n \n feat1_new = torch.cat([pos_diff, feat_diff], dim = 1) # [B, 2*C+3,N,S]\n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n feat1_new = F.relu(bn(conv(feat1_new)))\n\n feat1_new = torch.max(feat1_new, -1)[0] # [B, mlp[-1], npoint]\n return pos1, feat1_new\n\nclass PointNetSetUpConv(nn.Module):\n def __init__(self, nsample, radius, f1_channel, f2_channel, mlp, mlp2, knn = True):\n super(PointNetSetUpConv, self).__init__()\n self.nsample = nsample\n self.radius = radius\n self.knn = knn\n self.mlp1_convs = nn.ModuleList()\n self.mlp2_convs = nn.ModuleList()\n last_channel = f2_channel+3\n for out_channel in mlp:\n self.mlp1_convs.append(nn.Sequential(nn.Conv2d(last_channel, out_channel, 1, bias=False),\n nn.BatchNorm2d(out_channel),\n nn.ReLU(inplace=False)))\n last_channel = out_channel\n if len(mlp) is not 0:\n last_channel = mlp[-1] + f1_channel\n else:\n last_channel = last_channel + f1_channel\n for out_channel in mlp2:\n self.mlp2_convs.append(nn.Sequential(nn.Conv1d(last_channel, out_channel, 1, bias=False),\n nn.BatchNorm1d(out_channel),\n nn.ReLU(inplace=False)))\n last_channel = out_channel\n\n def forward(self, pos1, pos2, feature1, feature2):\n \"\"\"\n Feature propagation from xyz2 (less points) to xyz1 (more points)\n Inputs:\n xyz1: (batch_size, 3, npoint1)\n xyz2: (batch_size, 3, npoint2)\n feat1: (batch_size, channel1, npoint1) features for xyz1 points (earlier layers, more points)\n feat2: (batch_size, channel1, npoint2) features for xyz2 points\n Output:\n feat1_new: (batch_size, npoint2, mlp[-1] or mlp2[-1] or channel1+3)\n TODO: Add support for skip links. Study how delta(XYZ) plays a role in feature updating.\n \"\"\"\n pos1_t = pos1.permute(0, 2, 1).contiguous()\n pos2_t = pos2.permute(0, 2, 1).contiguous()\n B,C,N = pos1.shape\n if self.knn:\n _, idx = pointutils.knn(self.nsample, pos1_t, pos2_t)\n else:\n idx, _ = query_ball_point(self.radius, self.nsample, pos2_t, pos1_t)\n \n pos2_grouped = pointutils.grouping_operation(pos2, idx)\n pos_diff = pos2_grouped - pos1.view(B, -1, N, 1) # [B,3,N1,S]\n\n feat2_grouped = pointutils.grouping_operation(feature2, idx)\n feat_new = torch.cat([feat2_grouped, pos_diff], dim = 1) # [B,C1+3,N1,S]\n for conv in self.mlp1_convs:\n feat_new = conv(feat_new)\n # max pooling\n feat_new = feat_new.max(-1)[0] # [B,mlp1[-1],N1]\n # concatenate feature in early layer\n if feature1 is not None:\n feat_new = torch.cat([feat_new, feature1], dim=1)\n # feat_new = feat_new.view(B,-1,N,1)\n for conv in self.mlp2_convs:\n feat_new = conv(feat_new)\n \n return feat_new\n\nclass PointNetFeaturePropogation(nn.Module):\n def __init__(self, in_channel, mlp):\n super(PointNetFeaturePropogation, self).__init__()\n self.mlp_convs = nn.ModuleList()\n self.mlp_bns = nn.ModuleList()\n last_channel = in_channel\n for out_channel in mlp:\n self.mlp_convs.append(nn.Conv1d(last_channel, out_channel, 1))\n self.mlp_bns.append(nn.BatchNorm1d(out_channel))\n last_channel = out_channel\n\n def forward(self, pos1, pos2, feature1, feature2):\n \"\"\"\n Input:\n xyz1: input points position data, [B, C, N]\n xyz2: sampled input points position data, [B, C, S]\n points1: input points data, [B, D, N]\n points2: input points data, [B, D, S]\n Return:\n new_points: upsampled points data, [B, D', N]\n \"\"\"\n pos1_t = pos1.permute(0, 2, 1).contiguous()\n pos2_t = pos2.permute(0, 2, 1).contiguous()\n B, C, N = pos1.shape\n \n # dists = square_distance(pos1, pos2)\n # dists, idx = dists.sort(dim=-1)\n # dists, idx = dists[:, :, :3], idx[:, :, :3] # [B, N, 3]\n dists,idx = pointutils.three_nn(pos1_t,pos2_t)\n dists[dists < 1e-10] = 1e-10\n weight = 1.0 / dists\n weight = weight / torch.sum(weight, -1,keepdim = True) # [B,N,3]\n interpolated_feat = torch.sum(pointutils.grouping_operation(feature2, idx) * weight.view(B, 1, N, 3), dim = -1) # [B,C,N,3]\n\n if feature1 is not None:\n feat_new = torch.cat([interpolated_feat, feature1], 1)\n else:\n feat_new = interpolated_feat\n \n for i, conv in enumerate(self.mlp_convs):\n bn = self.mlp_bns[i]\n feat_new = F.relu(bn(conv(feat_new)))\n return feat_new\n\n\nclass FlowNet3D(nn.Module):\n def __init__(self):\n super(FlowNet3D, self).__init__()\n\n self.sa1 = PointNetSetAbstraction(npoint=1024, radius=0.5, nsample=16, in_channel=3, mlp=[32,32,64], group_all=False)\n self.sa2 = PointNetSetAbstraction(npoint=256, radius=1.0, nsample=16, in_channel=64, mlp=[64, 64, 128], group_all=False)\n self.sa3 = PointNetSetAbstraction(npoint=64, radius=2.0, nsample=8, in_channel=128, mlp=[128, 128, 256], group_all=False)\n self.sa4 = PointNetSetAbstraction(npoint=16, radius=4.0, nsample=8, in_channel=256, mlp=[256, 256, 512], group_all=False)\n \n self.fe_layer = FlowEmbedding(radius=10.0, nsample=64, in_channel = 128, mlp=[128, 128, 128], pooling='max', corr_func='concat')\n \n self.su1 = PointNetSetUpConv(nsample=8, radius=2.4, f1_channel = 256, f2_channel = 512, mlp=[], mlp2=[256, 256])\n self.su2 = PointNetSetUpConv(nsample=8, radius=1.2, f1_channel = 128+128, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])\n self.su3 = PointNetSetUpConv(nsample=8, radius=0.6, f1_channel = 64, f2_channel = 256, mlp=[128, 128, 256], mlp2=[256])\n self.fp = PointNetFeaturePropogation(in_channel = 256+3, mlp = [256, 256])\n \n self.conv1 = nn.Conv1d(256, 128, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm1d(128)\n self.conv2=nn.Conv1d(128, 3, kernel_size=1, bias=True)\n \n def forward(self, pc1, pc2, feature1, feature2):\n l1_pc1, l1_feature1 = self.sa1(pc1, feature1)\n l2_pc1, l2_feature1 = self.sa2(l1_pc1, l1_feature1)\n \n l1_pc2, l1_feature2 = self.sa1(pc2, feature2)\n l2_pc2, l2_feature2 = self.sa2(l1_pc2, l1_feature2)\n \n _, l2_feature1_new = self.fe_layer(l2_pc1, l2_pc2, l2_feature1, l2_feature2)\n\n l3_pc1, l3_feature1 = self.sa3(l2_pc1, l2_feature1_new)\n l4_pc1, l4_feature1 = self.sa4(l3_pc1, l3_feature1)\n \n l3_fnew1 = self.su1(l3_pc1, l4_pc1, l3_feature1, l4_feature1)\n l2_fnew1 = self.su2(l2_pc1, l3_pc1, torch.cat([l2_feature1, l2_feature1_new], dim=1), l3_fnew1)\n l1_fnew1 = self.su3(l1_pc1, l2_pc1, l1_feature1, l2_fnew1)\n l0_fnew1 = self.fp(pc1, l1_pc1, feature1, l1_fnew1)\n \n x = F.relu(self.bn1(self.conv1(l0_fnew1)))\n sf = self.conv2(x)\n return sf\n \nif __name__ == '__main__':\n import os\n import torch\n os.environ[\"CUDA_VISIBLE_DEVICES\"] = '0'\n input = torch.randn((8,3,2048))\n label = torch.randn(8,16)\n model = FlowNet3D()\n output = model(input,input)\n print(output.size())"
] | [
[
"torch.nn.BatchNorm1d",
"torch.randint",
"torch.max",
"torch.ones",
"torch.cat",
"torch.sqrt",
"torch.randn",
"torch.nn.ModuleList",
"torch.zeros",
"torch.sum",
"torch.arange",
"torch.nn.Conv2d",
"numpy.mean",
"torch.nn.BatchNorm2d",
"torch.nn.Conv1d",
"torch.nn.ReLU",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CQCL/tket | [
"3f3d7c24d9a6c9cfd85966c13dcccc8a13f92adb"
] | [
"pytket/tests/utils_test.py"
] | [
"# Copyright 2019-2022 Cambridge Quantum Computing\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import Counter\nimport numpy as np\nfrom pytket.backends.backend import Backend\nfrom hypothesis import strategies, given, settings, HealthCheck\nfrom hypothesis.strategies._internal import SearchStrategy\n\nfrom pytket.circuit import Qubit, Circuit, OpType # type: ignore\nfrom pytket.pauli import Pauli, QubitPauliString # type: ignore\nfrom pytket.partition import PauliPartitionStrat, GraphColourMethod # type: ignore\nfrom pytket.transform import Transform # type: ignore\nfrom pytket.utils.expectations import (\n expectation_from_shots,\n expectation_from_counts,\n get_operator_expectation_value,\n)\nfrom pytket.utils.measurements import append_pauli_measurement, _all_pauli_measurements\nfrom pytket.utils.results import (\n counts_from_shot_table,\n get_n_qb_from_statevector,\n probs_from_counts,\n probs_from_state,\n permute_basis_indexing,\n compare_statevectors,\n)\nfrom pytket.utils.outcomearray import OutcomeArray\nfrom pytket.utils import QubitPauliOperator, Graph\nfrom pytket.utils.symbolic import (\n circuit_apply_symbolic_statevector,\n circuit_to_symbolic_unitary,\n)\nimport pytest # type: ignore\nimport types\nfrom sympy import symbols # type: ignore\nfrom typing import Any, Callable, Tuple, Dict, List\nfrom simulator import TketSimShotBackend, TketSimBackend # type: ignore\n\n\ndef test_append_measurements() -> None:\n c = Circuit(4)\n qps = QubitPauliString(\n [Qubit(i) for i in range(4)], [Pauli.Y, Pauli.I, Pauli.X, Pauli.Z]\n )\n append_pauli_measurement(qps, c)\n coms = c.get_commands()\n assert len(coms) == 5\n assert str(coms[0]) == \"Measure q[3] --> c[2];\"\n assert str(coms[1]) == \"Rx(0.5) q[0];\"\n assert str(coms[2]) == \"H q[2];\"\n assert str(coms[3]) == \"Measure q[0] --> c[0];\"\n assert str(coms[4]) == \"Measure q[2] --> c[1];\"\n\n\ndef test_append_measurements_err0() -> None:\n c = Circuit(2)\n qps = QubitPauliString(Qubit(2), Pauli.X)\n with pytest.raises(RuntimeError) as ex:\n append_pauli_measurement(qps, c)\n assert \"Circuit does not contain unit with id: q[2]\" in str(ex.value)\n\n\ndef test_all_paulis() -> None:\n c = Circuit(2)\n qps1 = QubitPauliString() # This will be ignored, as it imposes no measurements\n qps2 = QubitPauliString(Qubit(0), Pauli.Z)\n qps3 = QubitPauliString(Qubit(1), Pauli.Z)\n qps4 = QubitPauliString(Qubit(0), Pauli.Z)\n qps4[Qubit(1)] = Pauli.Z\n op = QubitPauliOperator({qps1: 1, qps2: 1, qps3: 2, qps4: -1.0j})\n circs = _all_pauli_measurements(op, c)\n assert isinstance(circs, types.GeneratorType)\n assert len(list(circs)) == 3\n\n\ndef test_shots_to_counts() -> None:\n shot_table = np.asarray([[0, 0], [0, 1], [0, 0]])\n counts = counts_from_shot_table(shot_table)\n assert len(counts) == 2\n assert counts[(0, 0)] == 2\n assert counts[(0, 1)] == 1\n\n\ndef test_counts_to_probs() -> None:\n counts: Dict[Tuple[int, ...], int] = {(0, 0): 4, (0, 1): 1, (1, 1): 3}\n probs = probs_from_counts(counts)\n assert len(probs) == 3\n assert probs[(0, 0)] == 0.5\n assert probs[(0, 1)] == 0.125\n assert probs[(1, 1)] == 0.375\n\n\ndef test_state_to_probs() -> None:\n state = np.asarray([0.5 - 0.5j, 0.5 + 0.5j, 1e-5, 0.999e-5])\n probs = probs_from_state(state)\n assert len(probs) == 2\n assert np.isclose(probs[(0, 0)], 0.5)\n assert np.isclose(probs[(0, 1)], 0.5)\n\n\ndef test_n_qb_from_statevector() -> None:\n state = np.asarray([0.5, 0.5, 0.5, 0.5])\n assert get_n_qb_from_statevector(state) == 2\n state = np.asarray([1.0, 0.0])\n assert get_n_qb_from_statevector(state) == 1\n state = np.asarray([1.0])\n assert get_n_qb_from_statevector(state) == 0\n state = np.zeros(128)\n assert get_n_qb_from_statevector(state) == 7\n\n\ndef test_n_qb_from_statevector_err() -> None:\n state = np.asarray([0.5, 0.5, 0.5])\n with pytest.raises(ValueError) as ex:\n get_n_qb_from_statevector(state)\n assert \"is not a power of 2\" in str(ex.value)\n\n\ndef test_permute_state() -> None:\n state = np.asarray([0, 0.8, 0.6, 0, 0, 0, 0, 0])\n permuted = permute_basis_indexing(state, (1, 2, 0))\n assert (permuted == np.asarray([0, 0, 0.8, 0, 0.6, 0, 0, 0])).all()\n\n\ndef test_permute_state_err1() -> None:\n state = np.asarray([0, 0.8, 0.6, 0, 0, 0, 0, 0])\n with pytest.raises(ValueError) as ex:\n permute_basis_indexing(state, (0, 1))\n assert \"Invalid permutation: length does not match number of qubits\" in str(\n ex.value\n )\n\n\ndef test_permute_state_err2() -> None:\n state = np.asarray([0, 0.8, 0.6, 0, 0, 0, 0, 0])\n with pytest.raises(ValueError) as ex:\n permute_basis_indexing(state, (0, 1, 3))\n assert \"Permutation is not a valid complete permutation.\" in str(ex.value)\n\n\ndef test_permute_state_err3() -> None:\n state = np.asarray([0, 0.8, 0.6, 0, 0, 0, 0, 0])\n with pytest.raises(ValueError) as ex:\n permute_basis_indexing(state, (0, 1, 0))\n assert \"Permutation is not a valid complete permutation.\" in str(ex.value)\n\n\ndef test_permute_basis_indexing() -> None:\n dimensions = 3\n bases = 1 << dimensions\n matrix = np.arange(bases**2).reshape((bases, bases))\n new_matrix = permute_basis_indexing(matrix, (1, 2, 0))\n assert np.array_equal(new_matrix, matrix[[0, 4, 1, 5, 2, 6, 3, 7], :])\n\n\ndef test_shot_expectation() -> None:\n shot_table = np.asarray([[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 1, 1]])\n assert expectation_from_shots(shot_table) == -1.0\n shot_table = np.asarray([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])\n assert expectation_from_shots(shot_table) == 1.0\n shot_table = np.asarray([[0, 0, 0], [0, 0, 1], [1, 1, 0], [1, 1, 1]])\n assert expectation_from_shots(shot_table) == 0.0\n\n\ndef test_count_expectation() -> None:\n counts: Dict[Tuple[int, ...], int] = {\n (0, 0, 1): 4,\n (0, 1, 0): 7,\n (1, 0, 0): 1,\n (1, 1, 1): 8,\n }\n assert expectation_from_counts(counts) == -1.0\n counts = {(0, 0, 0): 4, (0, 1, 1): 7, (1, 0, 1): 1, (1, 1, 0): 8}\n assert expectation_from_counts(counts) == 1.0\n counts = {(0, 0, 0): 4, (0, 0, 1): 7, (1, 1, 0): 1, (1, 1, 1): 8}\n assert expectation_from_counts(counts) == -0.5\n\n\ndef test_outcomearray() -> None:\n in_listA = [1, 0, 1, 1] * 3\n in_listB = [1, 1, 0, 1] * 3\n\n list2int = lambda lis, order=True: int(\n \"\".join(map(str, lis[:: (-1) ** (not order)])), 2\n )\n assert OutcomeArray.from_readouts([in_listA]).to_readouts().tolist() == [in_listA]\n assert all(\n list(tup) == in_listA\n for tup in OutcomeArray.from_readouts([in_listA] * 4).to_readouts()\n )\n\n outcomeA = OutcomeArray.from_readouts([in_listA])\n assert outcomeA.n_outcomes == 1\n assert outcomeA.to_intlist() == [list2int(in_listA)] # type: ignore\n\n outcomeB = OutcomeArray.from_readouts([in_listB])\n outcome2D = OutcomeArray(np.array([outcomeA[0], outcomeB[0]]), outcomeA.width)\n assert outcome2D.n_outcomes == 2\n assert outcome2D.to_intlist() == [list2int(in_listA), list2int(in_listB)] # type: ignore\n assert outcome2D.to_intlist(False) == [\n list2int(in_listA, False), # type: ignore\n list2int(in_listB, False), # type: ignore\n ]\n\n for big in (True, False):\n intlist = [12, 5]\n readout_array = np.array([[1, 1, 0, 0], [0, 1, 0, 1]])\n if not big:\n readout_array = np.fliplr(readout_array) # type: ignore\n\n outcome_from_ints = OutcomeArray.from_ints(intlist, 4, big_endian=big)\n assert np.array_equal(outcome_from_ints.to_readouts(), readout_array)\n assert outcome_from_ints.to_intlist(big) == intlist\n assert outcome_from_ints.to_intlist(not big) != intlist\n\n outcomeRepeats = OutcomeArray.from_readouts([in_listA, in_listA, in_listB])\n\n counts = outcomeRepeats.counts()\n assert len(counts) == 2\n assert counts[outcomeA] == 2\n assert counts[outcomeB] == 1\n\n counts1D = outcomeA.counts()\n assert len(counts1D) == 1\n assert counts1D[outcomeA] == 1\n\n # 0 width outcomearrays\n readouts: List[List[int]] = [[] for _ in range(10)]\n empty_array = OutcomeArray.from_readouts(readouts)\n assert np.array_equal(empty_array.to_readouts(), readouts)\n assert empty_array.counts() == Counter(\n {OutcomeArray(np.zeros((1, 0), dtype=np.uint8), 0): 10}\n )\n\n\ndef test_small_pauli_partition_expectation() -> None:\n c = Circuit(2)\n c.X(1)\n qps1 = QubitPauliString(Qubit(0), Pauli.Z)\n qps2 = QubitPauliString(Qubit(1), Pauli.Z)\n op = QubitPauliOperator({qps1: 0.5, qps2: 1.0})\n backend = TketSimShotBackend()\n n_shots = 10000\n strats = [\n None,\n PauliPartitionStrat.NonConflictingSets,\n PauliPartitionStrat.CommutingSets,\n ]\n for strat in strats:\n energy = complex(\n get_operator_expectation_value(c, op, backend, n_shots, strat, seed=4) # type: ignore\n )\n assert np.isclose(energy, -0.5, atol=0.01)\n\n\ndef test_medium_pauli_partition_expectation() -> None:\n c = Circuit(4)\n c.H(1)\n c.H(3)\n c.Z(3)\n qps1 = QubitPauliString({Qubit(0): Pauli.Z, Qubit(1): Pauli.Z, Qubit(2): Pauli.Z})\n qps2 = QubitPauliString(Qubit(0), Pauli.Y)\n qps3 = QubitPauliString({Qubit(1): Pauli.X, Qubit(3): Pauli.X})\n\n op = QubitPauliOperator({qps1: 0.5, qps2: 0.8, qps3: -10.2})\n backends = [TketSimShotBackend(), TketSimBackend()]\n n_shots_list = [10000, None]\n strats = [\n None,\n PauliPartitionStrat.NonConflictingSets,\n PauliPartitionStrat.CommutingSets,\n ]\n for backend, n_shots in zip(backends, n_shots_list):\n for strat in strats:\n energy = get_operator_expectation_value(\n c, op, backend, n_shots, strat, GraphColourMethod.LargestFirst, seed=456\n )\n assert np.isclose(float(np.real(energy)), 10.2, atol=0.01) # type: ignore\n\n\ndef test_large_pauli_partition_expectation() -> None:\n c = Circuit(5)\n c.CX(0, 2)\n c.H(4)\n c.V(2)\n qps1 = QubitPauliString({Qubit(0): Pauli.Z, Qubit(1): Pauli.Z})\n qps2 = QubitPauliString({Qubit(0): Pauli.X, Qubit(2): Pauli.X})\n qps3 = QubitPauliString({Qubit(0): Pauli.Y, Qubit(2): Pauli.Y})\n qps4 = QubitPauliString(\n {Qubit(1): Pauli.Z, Qubit(2): Pauli.Z, Qubit(3): Pauli.X, Qubit(4): Pauli.Z}\n )\n qps5 = QubitPauliString({Qubit(3): Pauli.Z, Qubit(4): Pauli.X})\n qps6 = QubitPauliString()\n op = QubitPauliOperator(\n {qps1: 0.3, qps2: -0.7j, qps3: 0.9, qps4: 0.83, qps5: 0.5, qps6: 0.5}\n )\n backends = [TketSimShotBackend(), TketSimBackend()]\n n_shots_list = [10000, None]\n strats = [\n None,\n PauliPartitionStrat.NonConflictingSets,\n PauliPartitionStrat.CommutingSets,\n ]\n for backend, n_shots in zip(backends, n_shots_list):\n energy = [\n get_operator_expectation_value(\n c,\n op,\n backend,\n n_shots,\n strat,\n GraphColourMethod.LargestFirst,\n seed=3,\n )\n for strat in strats\n ]\n assert np.isclose(energy, [1.3, 1.3, 1.3], atol=0.02).all()\n energy2 = [\n get_operator_expectation_value(\n c, op, backend, n_shots, strat, GraphColourMethod.Lazy, seed=3\n )\n for strat in strats\n ]\n assert np.isclose(\n energy2,\n [1.3, 1.3, 1.3],\n atol=0.02,\n ).all()\n\n\ndef test_inversion_pauli_partition_expectation() -> None:\n c = Circuit(4)\n\n c.H(0)\n c.Vdg(1)\n c.Vdg(2)\n c.Vdg(3)\n\n qb_list = [Qubit(i) for i in range(4)]\n qps1 = QubitPauliString(qb_list, [Pauli.X, Pauli.Y, Pauli.Y, Pauli.Y])\n qps2 = QubitPauliString(qb_list, [Pauli.Y, Pauli.X, Pauli.Y, Pauli.Y])\n qps3 = QubitPauliString(qb_list, [Pauli.Y, Pauli.Y, Pauli.X, Pauli.Y])\n qps4 = QubitPauliString(qb_list, [Pauli.Y, Pauli.Y, Pauli.Y, Pauli.X])\n qps5 = QubitPauliString(qb_list, [Pauli.X, Pauli.X, Pauli.X, Pauli.Y])\n qps6 = QubitPauliString(qb_list, [Pauli.X, Pauli.X, Pauli.Y, Pauli.X])\n qps7 = QubitPauliString(qb_list, [Pauli.X, Pauli.Y, Pauli.X, Pauli.X])\n qps8 = QubitPauliString(qb_list, [Pauli.Y, Pauli.X, Pauli.X, Pauli.X])\n op = QubitPauliOperator(\n {\n qps1: 0.1,\n qps2: 0.2,\n qps3: 0.3,\n qps4: 0.4,\n qps5: 0.5,\n qps6: 0.6,\n qps7: 0.7,\n qps8: 0.8,\n }\n )\n backend = TketSimShotBackend()\n n_shots = 10000\n strats = [\n None,\n PauliPartitionStrat.NonConflictingSets,\n PauliPartitionStrat.CommutingSets,\n ]\n energy = [\n get_operator_expectation_value(\n c, op, backend, n_shots, strat, GraphColourMethod.Lazy, seed=54\n )\n for strat in strats\n ]\n assert np.isclose(energy, [0.04248, 0.04248, 0.08612], atol=0.01).all()\n energy2 = [\n get_operator_expectation_value(\n c, op, backend, n_shots, strat, GraphColourMethod.LargestFirst, seed=54\n )\n for strat in strats\n ]\n assert np.isclose(energy2, [0.04248, 0.04248, 0.08612], atol=0.01).all()\n\n\ndef test_compare_statevectors() -> None:\n test_vec = np.array([1 + 2 * 1j, 3 + 4 * 1j, 5 + 6 * 1j, 7 + 8 * 1j])\n other_vec = test_vec + (2 - 1.2 * 1j)\n test_vec /= np.sqrt(np.vdot(test_vec, test_vec)) # type: ignore\n other_vec /= np.sqrt(np.vdot(other_vec, other_vec)) # type: ignore\n\n assert compare_statevectors(test_vec, test_vec)\n assert not compare_statevectors(test_vec, other_vec)\n assert compare_statevectors(other_vec, other_vec)\n phase = np.exp(1j * 0.453)\n assert compare_statevectors(test_vec, phase * test_vec)\n\n\ndef test_dag() -> None:\n c = Circuit(4, 4)\n c.X(0)\n c.H(1)\n c.Rz(0.5, 1)\n c.CX(2, 0)\n c.CRz(0.5, 0, 3)\n c.Measure(3, 3)\n c.Measure(1, 1)\n c.Z(0, condition_bits=[3, 1], condition_value=2)\n c.H(0)\n D = Graph(c)\n Gnx = D.as_nx()\n assert len(Gnx) == 2 * (4 + 4) + c.n_gates\n G = D.get_DAG()\n assert G.directed\n assert G.name == \"Circuit\"\n Gqc = D.get_qubit_graph()\n assert not Gqc.directed\n assert Gqc.name == \"Qubit connectivity\"\n\n\ndef test_dag_implicit_perm() -> None:\n # THET-701\n c = Circuit(3).CX(0, 1).CX(1, 0).CX(1, 2).CX(2, 1)\n Transform.OptimiseCliffords().apply(c)\n assert not all(x == y for x, y in c.implicit_qubit_permutation().items())\n G = Graph(c)\n dag = G.get_DAG()\n assert dag.name == \"Circuit\"\n\n\[email protected]\ndef unitary_circuits(draw: Callable[[SearchStrategy[Any]], Any]) -> Circuit:\n # generate example symbolic unitary circuits\n n_qb = draw(strategies.integers(min_value=1, max_value=3))\n # available qubits as integers\n qb_strat = strategies.integers(min_value=0, max_value=n_qb - 1)\n # some symbols to sample from\n syms = symbols(\"a b c d e\") # type: ignore\n c = Circuit(n_qb)\n\n optype_dict = {\n typ: (1, 0)\n for typ in (\n OpType.Z,\n OpType.X,\n OpType.Y,\n OpType.S,\n OpType.Sdg,\n OpType.T,\n OpType.Tdg,\n OpType.V,\n OpType.Vdg,\n OpType.SX,\n OpType.SXdg,\n OpType.H,\n )\n }\n optype_dict.update(\n {typ: (1, 1) for typ in (OpType.Rx, OpType.Rz, OpType.Ry, OpType.U1)}\n )\n optype_dict.update({typ: (1, 2) for typ in (OpType.U2, OpType.PhasedX)})\n optype_dict.update({typ: (1, 3) for typ in (OpType.U3,)})\n\n optype_dict.update(\n {\n typ: (2, 0)\n for typ in (\n OpType.CX,\n OpType.CY,\n OpType.CZ,\n OpType.CH,\n OpType.CV,\n OpType.CVdg,\n OpType.CSX,\n OpType.CSXdg,\n OpType.SWAP,\n OpType.ISWAPMax,\n OpType.Sycamore,\n OpType.ZZMax,\n )\n }\n )\n optype_dict.update(\n {\n typ: (2, 1)\n for typ in (\n OpType.CRz,\n OpType.CRx,\n OpType.CRy,\n OpType.CU1,\n OpType.ISWAP,\n OpType.XXPhase,\n OpType.YYPhase,\n OpType.ZZPhase,\n OpType.ESWAP,\n )\n }\n )\n optype_dict.update({typ: (2, 2) for typ in (OpType.PhasedISWAP, OpType.FSim)})\n optype_dict.update({typ: (2, 3) for typ in (OpType.CU3,)})\n\n optype_dict.update(\n {typ: (3, 0) for typ in (OpType.CCX, OpType.CSWAP, OpType.BRIDGE)}\n )\n\n optype_dict.update({OpType.XXPhase3: (3, 1)})\n\n optype_strat = strategies.sampled_from(list(optype_dict.keys()))\n for _ in range(5):\n typ = draw(optype_strat.filter(lambda x: optype_dict[x][0] <= n_qb))\n params = [\n draw(strategies.sampled_from(syms)) for _ in range(optype_dict[typ][1])\n ]\n qbs = [draw(qb_strat)]\n for _ in range(1, optype_dict[typ][0]):\n qbs.append(draw(qb_strat.filter(lambda x: x not in qbs)))\n\n c.add_gate(typ, params, qbs)\n return c\n\n\ndef unitary_from_states(circ: Circuit, back: Backend) -> np.ndarray:\n # use statevector simulation to calculate unitary from all input basis states\n nqb = circ.n_qubits\n matdim = 1 << nqb\n outar = np.zeros((matdim, matdim), dtype=np.complex128)\n\n for i in range(matdim):\n bitstr = f\"{i:0{nqb}b}\"\n basis_circ = Circuit(nqb)\n for qb, val in enumerate(bitstr):\n if val == \"1\":\n basis_circ.X(qb)\n basis_circ.append(circ)\n outar[:, i] = back.run_circuit(basis_circ).get_state()\n\n return outar\n\n\n# this is a _slow_ test, so examples are kept low\n# deadline has to be None because sympy runtime is very unpredictable\n@given(circ=unitary_circuits())\n@settings(\n deadline=None, max_examples=20, suppress_health_check=[HealthCheck.data_too_large]\n)\ndef test_symbolic_conversion(circ: Circuit) -> None:\n\n sym_state = circuit_apply_symbolic_statevector(circ)\n\n sym_unitary = circuit_to_symbolic_unitary(circ)\n\n free_symbs = circ.free_symbols()\n # bind random values to symbolic variables to test numeric equality\n bind_vals = np.random.rand(len(free_symbs))\n\n substitutions = [(sym, val) for sym, val in zip(free_symbs, bind_vals)]\n circ.symbol_substitution(dict(substitutions))\n sym_unitary = sym_unitary.subs(substitutions) # type: ignore\n sym_state = sym_state.subs(substitutions) # type: ignore\n\n numeric_unitary = np.array(sym_unitary).astype(np.complex128)\n numeric_state = np.array(sym_state).astype(np.complex128)\n\n simulated_state_without_optimisation = circ.get_statevector()\n assert np.allclose(\n numeric_state.T, simulated_state_without_optimisation, atol=1e-10\n )\n\n simulated_unitary_without_optimisation = circ.get_unitary()\n assert np.allclose(\n numeric_unitary, simulated_unitary_without_optimisation, atol=1e-10\n )\n\n back = TketSimBackend()\n circ = back.get_compiled_circuit(circ, 1)\n result = back.run_circuit(circ)\n simulated_state = result.get_state()\n assert np.allclose(numeric_state.T, simulated_state, atol=1e-10)\n\n simulated_unitary = unitary_from_states(circ, back)\n assert np.allclose(numeric_unitary, simulated_unitary, atol=1e-10)\n\n\nif __name__ == \"__main__\":\n test_append_measurements()\n test_append_measurements_err0()\n test_all_paulis()\n test_shots_to_counts()\n test_counts_to_probs()\n test_state_to_probs()\n test_permute_state()\n test_n_qb_from_statevector_err()\n test_permute_state_err1()\n test_permute_state_err2()\n test_permute_state_err3()\n test_permute_basis_indexing()\n test_shot_expectation()\n test_count_expectation()\n test_small_pauli_partition_expectation()\n test_medium_pauli_partition_expectation()\n test_large_pauli_partition_expectation()\n test_inversion_pauli_partition_expectation()\n test_dag()\n test_dag_implicit_perm()\n"
] | [
[
"numpy.allclose",
"numpy.array_equal",
"numpy.vdot",
"numpy.asarray",
"numpy.arange",
"numpy.fliplr",
"numpy.real",
"numpy.exp",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
r-grewal/data5709 | [
"70b6842a5caf139786fa4e7dae8f4e0a22e98fdd"
] | [
"networks_td3.py"
] | [
"import os\nimport torch as T\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom typing import Tuple\n\nclass ActorNetwork(nn.Module):\n \"\"\"\n Actor network for single GPU.\n\n Methods:\n forward(state):\n Forward propogate states to obtain next action components.\n\n save_checkpoint():\n Saves network parameters.\n \n load_checkpoint():\n Loads network parameters.\n \"\"\"\n\n def __init__(self, inputs_dict: dict, target: bool):\n \"\"\"\n Intialise class varaibles by creating neural network with Adam optimiser.\n\n Parameters:\n inputs_dict: dictionary containing all execution details\n target: whether constructing target network (1) or not (0)\n \"\"\"\n super(ActorNetwork, self).__init__()\n self.input_dims = sum(inputs_dict['input_dims'])\n self.num_actions = int(inputs_dict['num_actions'])\n self.max_action = float(inputs_dict['max_action'])\n\n env_id = str(inputs_dict['env_id'])\n algo_name = str(inputs_dict['algo'])\n loss_type = str(inputs_dict['loss_fn'])\n nn_name = 'actor' if target == 0 else 'actor_target'\n \n fc1_dim = int(inputs_dict['td3_layer_1_units'])\n fc2_dim = int(inputs_dict['td3_layer_2_units'])\n lr_alpha = inputs_dict['td3_actor_learn_rate']\n \n # directory to save network checkpoints\n if not os.path.exists('./models/'+'/'+env_id):\n os.makedirs('./models/'+'/'+env_id)\n self.file_checkpoint = os.path.join('./models/'+'/'+env_id, env_id\n +'--'+algo_name+'_'+loss_type\n +'_'+nn_name)\n\n # network inputs environment space shape\n self.fc1 = nn.Linear(self.input_dims, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.mu = nn.Linear(fc2_dim, self.num_actions)\n\n self.optimizer = optim.Adam(self.parameters(), lr=lr_alpha)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n\n self.to(self.device)\n\n def forward(self, state: T.FloatTensor) -> T.FloatTensor:\n \"\"\"\n Forward propogation of mini-batch states to obtain next actor actions.\n\n Parameters:\n state: current environment states\n\n Returns:\n actions_scaled: next agent actions between -1 and 1 scaled by max action\n \"\"\"\n actions = self.fc1(state)\n actions = F.relu(actions)\n actions = self.fc2(actions)\n actions = F.relu(actions)\n actions_scaled = T.tanh(self.mu(actions)) * self.max_action\n\n return actions_scaled\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.file_checkpoint)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.file_checkpoint))\n\nclass CriticNetwork(nn.Module):\n \"\"\"\n Critic network for single GPU. \n\n Methods:\n forward(state):\n Forward propogate concatenated state and action to obtain Q-values.\n\n save_checkpoint():\n Saves network parameters.\n \n load_checkpoint():\n Loads network parameters.\n \"\"\"\n \n def __init__(self, inputs_dict: dict, critic: int, target: bool):\n \"\"\"\n Intialise class varaibles by creating neural network with Adam optimiser.\n\n Parameters:\n inputs_dict: dictionary containing all execution details\n critic: number assigned to critic\n target: whether constructing target network (1) or not (0)\n \"\"\"\n super(CriticNetwork, self).__init__()\n self.input_dims = sum(inputs_dict['input_dims'])\n self.num_actions = int(inputs_dict['num_actions'])\n self.max_action = float(inputs_dict['max_action'])\n\n env_id = str(inputs_dict['env_id'])\n algo_name = str(inputs_dict['algo'])\n loss_type = str(inputs_dict['loss_fn'])\n nn_name = 'critic' if target == 0 else 'target_critic'\n nn_name += '_'+str(critic)\n \n fc1_dim = int(inputs_dict['td3_layer_1_units'])\n fc2_dim = int(inputs_dict['td3_layer_2_units'])\n lr_beta = inputs_dict['td3_critic_learn_rate']\n\n # directory to save network checkpoints\n if not os.path.exists('./models/'+'/'+env_id):\n os.makedirs('./models/'+'/'+env_id)\n self.file_checkpoint = os.path.join('./models/'+'/'+env_id, env_id\n +'--'+algo_name+'_'+loss_type\n +'_'+nn_name)\n\n # network inputs environment space shape and number of actions\n self.fc1 = nn.Linear(self.input_dims + self.num_actions, fc1_dim)\n self.fc2 = nn.Linear(fc1_dim, fc2_dim)\n self.q = nn.Linear(fc2_dim, 1)\n\n self.optimizer = optim.Adam(self.parameters(), lr=lr_beta)\n self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')\n\n self.to(self.device)\n\n def forward(self, state: T.FloatTensor, action: T.FloatTensor) -> T.FloatTensor:\n \"\"\"\n Forward propogation of mini-batch state-action pairs to obtain Q-value.\n\n Parameters:\n state: current environment states\n action: continuous next actions taken at current states \n\n Returns:\n Q (float): estimated Q action-value\n \"\"\"\n Q_action_value = self.fc1(T.cat([state, action], dim=1))\n Q_action_value = F.relu(Q_action_value)\n Q_action_value = self.fc2(Q_action_value)\n Q_action_value = F.relu(Q_action_value)\n Q = self.q(Q_action_value)\n\n return Q\n\n def save_checkpoint(self):\n T.save(self.state_dict(), self.file_checkpoint)\n\n def load_checkpoint(self):\n self.load_state_dict(T.load(self.file_checkpoint))"
] | [
[
"torch.cat",
"torch.load",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.cuda.is_available"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Layne-Huang/CoaDTI | [
"cf3a33347c709b21c0a330e27041cec2bc2bb600"
] | [
"code/coaDTI.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport numpy as np\r\nfrom torch_geometric.nn import TopKPooling\r\nfrom torch_geometric.nn import global_mean_pool as gap, global_max_pool as gmp\r\nfrom torch_geometric.utils import add_self_loops, degree, remove_self_loops\r\nfrom torch_geometric.nn import MessagePassing\r\nimport torch.nn.functional as F\r\nfrom torch.nn.utils import clip_grad_norm_\r\nimport torch\r\n\r\n\r\nfrom transformers import XLNetModel, BertTokenizer, pipeline, BertModel\r\n\r\nimport math\r\n\r\nembed_dim = 128\r\ndevice = torch.device('cuda')\r\n\r\ndef gelu(x):\r\n out = 1 + torch.tanh(np.sqrt(2 / np.pi) * (x + 0.044715 * torch.pow(x, 3)))\r\n return out * x / 2\r\n\r\nclass PositionalEncoding(nn.Module):\r\n def __init__(self, d_model,dropout=0.1, max_len=1000):\r\n super(PositionalEncoding, self).__init__()\r\n self.dropout = nn.Dropout(p=dropout)\r\n pe = torch.zeros(max_len, d_model)\r\n position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\r\n div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0).transpose(0, 1)\r\n self.register_buffer('pe', pe)\r\n\r\n def forward(self, x):\r\n x = x + self.pe[:x.size(0), :]\r\n return self.dropout(x)\r\n\r\nclass LearnedPositionEncoding(nn.Embedding):\r\n def __init__(self,d_model, dropout = 0.1,max_len = 5000):\r\n super().__init__(max_len, d_model)\r\n self.dropout = nn.Dropout(p = dropout)\r\n\r\n def forward(self, x):\r\n weight = self.weight.data.unsqueeze(1)\r\n x = x + weight[:x.size(0), :]\r\n return self.dropout(x)\r\n\r\nclass Att(nn.Module):\r\n def __init__(self, hid_dim, dropout):\r\n super(Att, self).__init__()\r\n\r\n self.linear_v = nn.Linear(hid_dim, hid_dim)\r\n self.linear_k = nn.Linear(hid_dim, hid_dim)\r\n self.linear_q = nn.Linear(hid_dim, hid_dim)\r\n self.linear_merge = nn.Linear(hid_dim, hid_dim)\r\n self.hid_dim = hid_dim\r\n self.dropout = dropout\r\n\r\n self.dropout = nn.Dropout(dropout)\r\n\r\n def forward(self, v, k, q, mask):\r\n atted = self.att(v, k, q, mask).transpose(-1,-2)\r\n atted = self.linear_merge(atted)\r\n\r\n return atted\r\n\r\n def att(self, value, key, query, mask):\r\n d_k = query.size(-1)\r\n\r\n scores = torch.matmul(\r\n query, key.transpose(-2, -1)\r\n ) / math.sqrt(d_k)\r\n\r\n if mask is not None:\r\n scores = scores.masked_fill(mask, -1e9)\r\n\r\n att_map = F.softmax(scores, dim=-1)\r\n att_map = self.dropout(att_map)\r\n\r\n return torch.matmul(att_map, value)\r\n\r\n\r\nclass MHAtt(nn.Module):\r\n def __init__(self, hid_dim, n_heads, dropout):\r\n super(MHAtt, self).__init__()\r\n\r\n self.linear_v = nn.Linear(hid_dim, hid_dim)\r\n self.linear_k = nn.Linear(hid_dim, hid_dim)\r\n self.linear_q = nn.Linear(hid_dim, hid_dim)\r\n self.linear_merge = nn.Linear(hid_dim, hid_dim)\r\n self.hid_dim = hid_dim\r\n self.dropout = dropout\r\n self.nhead = n_heads\r\n\r\n self.dropout = nn.Dropout(dropout)\r\n self.hidden_size_head = int(self.hid_dim / self.nhead)\r\n def forward(self, v, k, q, mask):\r\n n_batches = q.size(0)\r\n\r\n v = self.linear_v(v).view(\r\n n_batches,\r\n -1,\r\n self.nhead,\r\n self.hidden_size_head\r\n ).transpose(1, 2)\r\n\r\n k = self.linear_k(k).view(\r\n n_batches,\r\n -1,\r\n self.nhead,\r\n self.hidden_size_head\r\n ).transpose(1, 2)\r\n\r\n q = self.linear_q(q).view(\r\n n_batches,\r\n -1,\r\n self.nhead,\r\n self.hidden_size_head\r\n ).transpose(1, 2)\r\n\r\n atted = self.att(v, k, q, mask)\r\n atted = atted.transpose(1, 2).contiguous().view(\r\n n_batches,\r\n -1,\r\n self.hid_dim\r\n )\r\n\r\n atted = self.linear_merge(atted)\r\n\r\n return atted\r\n\r\n def att(self, value, key, query, mask):\r\n d_k = query.size(-1)\r\n\r\n scores = torch.matmul(\r\n query, key.transpose(-2, -1)\r\n ) / math.sqrt(d_k)\r\n\r\n if mask is not None:\r\n scores = scores.masked_fill(mask, -1e9)\r\n\r\n att_map = F.softmax(scores, dim=-1)\r\n att_map = self.dropout(att_map)\r\n\r\n return torch.matmul(att_map, value)\r\n\r\n# class MultiAttn(nn.Module):\r\n# def __init__(self, in_dim, head_num=8):\r\n# super(MultiAttn, self).__init__()\r\n#\r\n# self.head_dim = in_dim // head_num\r\n# self.head_num = head_num\r\n#\r\n# # scaled dot product attention\r\n# self.scale = self.head_dim ** -0.5\r\n#\r\n# self.w_qs = nn.Linear(in_dim, head_num * self.head_dim, bias=True)\r\n# self.w_ks = nn.Linear(in_dim, head_num * self.head_dim, bias=True)\r\n# self.w_vs = nn.Linear(in_dim, head_num * self.head_dim, bias=True)\r\n#\r\n# self.w_os = nn.Linear(head_num * self.head_dim, in_dim, bias=True)\r\n#\r\n# self.gamma = nn.Parameter(torch.FloatTensor([0]))\r\n#\r\n# self.softmax = nn.Softmax(dim=-1)\r\n#\r\n# def forward(self, x, y, attn_mask, non_pad_mask):\r\n# B, L, H = x.size()\r\n# head_num = self.head_num\r\n# head_dim = self.head_dim\r\n#\r\n# q = self.w_qs(y).view(B * head_num, L, head_dim)\r\n# k = self.w_ks(y).view(B * head_num, L, head_dim)\r\n# v = self.w_vs(x).view(B * head_num, L, head_dim)\r\n#\r\n# if attn_mask is not None:\r\n# attn_mask = attn_mask.repeat(head_num, 1, 1)\r\n#\r\n# attn = torch.bmm(q, k.transpose(1, 2)) # B*head_num, L, L\r\n# attn = self.scale * attn\r\n# if attn_mask is not None:\r\n# attn_mask = attn_mask.repeat(head_num, 1, 1)\r\n# attn = attn.masked_fill_(attn_mask, -np.inf)\r\n# attn = self.softmax(attn)\r\n#\r\n# out = torch.bmm(attn, v) # B*head_num, L, head_dim\r\n#\r\n# out = out.view(B, L, head_dim * head_num)\r\n#\r\n# out = self.w_os(out)\r\n#\r\n# if non_pad_mask is not None:\r\n# attn_mask = attn_mask.repeat(head_num, 1, 1)\r\n# out = non_pad_mask * out\r\n#\r\n# out = self.gamma * out + x\r\n#\r\n# return out\r\n\r\nclass DPA(nn.Module):\r\n def __init__(self, hid_dim, n_heads, dropout):\r\n super(DPA, self).__init__()\r\n\r\n self.mhatt1 = MHAtt(hid_dim, n_heads, dropout)\r\n # self.mhatt1 = MultiAttn(hid_dim, n_heads)\r\n\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.norm1 = nn.LayerNorm(hid_dim)\r\n\r\n\r\n\r\n def forward(self, x, y, y_mask=None):\r\n\r\n # x as V while y as Q and K\r\n # x = self.norm1(x + self.dropout1(\r\n # self.mhatt1(x, x, y, y_mask)\r\n # ))\r\n x = self.norm1(x+self.dropout1(\r\n self.mhatt1(y, y, x, y_mask)\r\n ))\r\n # x = self.norm1(x + self.dropout1(\r\n # self.mhatt1(x, y, y_mask, y_mask)\r\n # ))\r\n\r\n return x\r\n\r\nclass SA(nn.Module):\r\n def __init__(self, hid_dim, n_heads, dropout):\r\n super(SA, self).__init__()\r\n\r\n self.mhatt1 = MHAtt(hid_dim, n_heads, dropout)\r\n # self.mhatt1 = MultiAttn(hid_dim, n_heads)\r\n\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.norm1 = nn.LayerNorm(hid_dim)\r\n\r\n\r\n\r\n def forward(self, x, mask=None):\r\n\r\n x = self.norm1(x + self.dropout1(\r\n self.mhatt1(x, x, x, mask)\r\n ))\r\n # x = self.norm1(x + self.dropout1(\r\n # self.mhatt1(x, x, mask, mask)\r\n # ))\r\n\r\n return x\r\n\r\nclass SEA(nn.Module):\r\n def __init__(self, hid_dim, dropout):\r\n super(SEA, self).__init__()\r\n\r\n self.mhatt1 = Att(hid_dim, dropout)\r\n # self.mhatt1 = MultiAttn(hid_dim, n_heads)\r\n\r\n self.dropout1 = nn.Dropout(dropout)\r\n self.norm1 = nn.LayerNorm(hid_dim)\r\n\r\n\r\n\r\n def forward(self, x, mask=None):\r\n\r\n x = self.norm1(x + self.dropout1(\r\n self.mhatt1(x, x, x, mask)\r\n ))\r\n # x = self.norm1(x + self.dropout1(\r\n # self.mhatt1(x, x, mask, mask)\r\n # ))\r\n\r\n return x\r\n\r\nclass GCNConv(MessagePassing):\r\n def __init__(self, in_channels, out_channels):\r\n super(GCNConv, self).__init__(aggr='add') # \"Add\" aggregation.\r\n self.lin = torch.nn.Linear(in_channels, out_channels)\r\n\r\n def forward(self, x, edge_index):\r\n # x has shape [N, in_channels]\r\n # edge_index has shape [2, E]\r\n\r\n # Step 1: Add self-loops to the adjacency matrix.\r\n edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\r\n\r\n # Step 2: Linearly transform node feature matrix.\r\n x = self.lin(x)\r\n\r\n # Step 3-5: Start propagating messages.\r\n return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\r\n\r\n def message(self, x_j, edge_index, size):\r\n # x_j has shape [E, out_channels]\r\n\r\n # Step 3: Normalize node features.\r\n row, col = edge_index\r\n deg = degree(row, size[0], dtype=x_j.dtype)\r\n deg_inv_sqrt = deg.pow(-0.5)\r\n norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]\r\n\r\n return norm.view(-1, 1) * x_j\r\n\r\n def update(self, aggr_out):\r\n # aggr_out has shape [N, out_channels]\r\n\r\n # Step 5: Return new node embeddings.\r\n return aggr_out\r\n\r\nclass SAGEConv(MessagePassing):\r\n def __init__(self, in_channels, out_channels):\r\n super(SAGEConv, self).__init__(aggr='max') # \"Max\" aggregation.\r\n self.lin = torch.nn.Linear(in_channels, out_channels)\r\n self.act = torch.nn.ReLU()\r\n self.update_lin = torch.nn.Linear(in_channels + out_channels, in_channels, bias=False)\r\n self.update_act = torch.nn.ReLU()\r\n\r\n def forward(self, x, edge_index):\r\n # x has shape [N, in_channels]\r\n # edge_index has shape [2, E]\r\n a = edge_index\r\n edge_index, _ = remove_self_loops(edge_index)\r\n edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))\r\n\r\n return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)\r\n\r\n def message(self, x_j):\r\n # x_j has shape [E, in_channels]\r\n\r\n x_j = self.lin(x_j)\r\n x_j = self.act(x_j)\r\n\r\n return x_j\r\n\r\n def update(self, aggr_out, x):\r\n # aggr_out has shape [N, out_channels]\r\n\r\n new_embedding = torch.cat([aggr_out, x], dim=1)\r\n\r\n new_embedding = self.update_lin(new_embedding)\r\n new_embedding = self.update_act(new_embedding)\r\n\r\n return new_embedding\r\n\r\nclass gnn(nn.Module):\r\n def __init__(self, n_fingerprint, pooling):\r\n super(gnn, self).__init__()\r\n self.pooling = pooling\r\n self.embed_fingerprint = nn.Embedding(num_embeddings=n_fingerprint, embedding_dim=embed_dim)\r\n self.conv1 = SAGEConv(embed_dim, 128)\r\n self.pool1 = TopKPooling(128, ratio=0.8)\r\n self.conv2 = SAGEConv(128, 128)\r\n self.pool2 = TopKPooling(128, ratio=0.8)\r\n self.conv3 = SAGEConv(128, 128)\r\n self.pool3 = TopKPooling(128, ratio=0.8)\r\n self.linp1 = torch.nn.Linear(256, 128)\r\n self.linp2 = torch.nn.Linear(128, 512)\r\n\r\n # self.lin1 = torch.nn.Linear(128, 256)\r\n # self.lin2 = torch.nn.Linear(256, 512)\r\n # self.lin3 = torch.nn.Linear(64, 90)\r\n self.lin = torch.nn.Linear(128, 512)\r\n self.bn1 = torch.nn.BatchNorm1d(128)\r\n self.bn2 = torch.nn.BatchNorm1d(64)\r\n self.act1 = torch.nn.ReLU()\r\n self.act2 = torch.nn.ReLU()\r\n\r\n def forward(self, data):\r\n x, edge_index, batch = data.x, data.edge_index, data.batch\r\n x = self.embed_fingerprint(x)\r\n\r\n x = x.squeeze(1)\r\n # print(\"after conv1:\\t\", self.conv1(x, edge_index).shape)# print(type(x))\r\n x = F.relu(self.conv1(x, edge_index))\r\n\r\n if self.pooling:\r\n x, edge_index, _, batch, _, _ = self.pool1(x, edge_index, None, batch)\r\n x1 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\r\n\r\n x = F.relu(self.conv2(x, edge_index))\r\n # print(x.shape)\r\n if self.pooling:\r\n x, edge_index, _, batch, _, _ = self.pool2(x, edge_index, None, batch)\r\n x2 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\r\n\r\n # x = F.relu(self.conv3(x, edge_index))\r\n\r\n if self.pooling:\r\n x, edge_index, _, batch, _, _ = self.pool3(x, edge_index, None, batch)\r\n x3 = torch.cat([gmp(x, batch), gap(x, batch)], dim=1)\r\n\r\n x = x1 + x2 + x3\r\n x = self.linp1(x)\r\n x = self.act1(x)\r\n x = self.linp2(x)\r\n if not self.pooling:\r\n # x = self.lin1(x)\r\n # x = self.act1(x)\r\n # x = self.lin2(x)\r\n x = self.lin(x)\r\n # x = self.lin(x)\r\n # x = self.act2(x)\r\n # x = self.lin3(x)\r\n # x = F.dropout(x, p=0.5, training=self.training)\r\n\r\n return x\r\n\r\nclass stack_cross_att(nn.Module):\r\n def __init__(self, dim, nhead, dropout):\r\n super(stack_cross_att, self).__init__()\r\n self.sca = SA(dim, nhead, dropout)\r\n self.spa = SA(dim, nhead, dropout)\r\n self.coa_cp = DPA(dim, nhead, dropout)\r\n\r\n def forward(self, protein_vector, compound_vector):\r\n compound_vector = self.sca(compound_vector, None) # self-attention\r\n protein_vector = self.spa(protein_vector, None) # self-attention\r\n protein_covector = self.coa_cp(protein_vector, compound_vector, None) # co-attention\r\n\r\n return protein_covector, compound_vector\r\n\r\nclass encoder_cross_att(nn.Module):\r\n def __init__(self, dim, nhead, dropout, layers):\r\n super(encoder_cross_att, self).__init__()\r\n # self.encoder_layers = nn.ModuleList([SEA(dim, dropout) for _ in range(layers)])\r\n self.encoder_layers = nn.ModuleList([SA(dim, nhead, dropout) for _ in range(layers)])\r\n self.decoder_sa = nn.ModuleList([SA(dim, nhead, dropout) for _ in range(layers)])\r\n self.decoder_coa = nn.ModuleList([DPA(dim, nhead, dropout) for _ in range(layers)])\r\n self.layer_coa = layers\r\n def forward(self, protein_vector, compound_vector):\r\n for i in range(self.layer_coa):\r\n compound_vector = self.encoder_layers[i](compound_vector, None) # self-attention\r\n for i in range(self.layer_coa):\r\n protein_vector = self.decoder_sa[i](protein_vector, None)\r\n protein_vector = self.decoder_coa[i](protein_vector, compound_vector, None)# co-attention\r\n\r\n return protein_vector, compound_vector\r\n\r\nclass inter_cross_att(nn.Module):\r\n def __init__(self, dim, nhead, dropout):\r\n super(inter_cross_att, self).__init__()\r\n self.sca = SA(dim, nhead, dropout)\r\n self.spa = SA(dim, nhead, dropout)\r\n self.coa_pc = DPA(dim, nhead, dropout)\r\n self.coa_cp = DPA(dim, nhead, dropout)\r\n\r\n def forward(self, protein_vector, compound_vector):\r\n compound_vector = self.sca(compound_vector, None) # self-attention\r\n protein_vector = self.spa(protein_vector, None) # self-attention\r\n compound_covector = self.coa_pc(compound_vector, protein_vector, None) # co-attention\r\n protein_covector = self.coa_cp(protein_vector, compound_vector, None) # co-attention\r\n\r\n return protein_covector, compound_covector\r\n\r\n\r\nclass Dtis(nn.Module):\r\n def __init__(self, n_fingerprint, dim, n_word, layer_output, layer_coa,\r\n d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, co_attention=False, gcn_pooling =False):\r\n super(Dtis, self).__init__()\r\n\r\n self.co_attention = co_attention\r\n # embedding layer\r\n self.embed_word = nn.Embedding(n_word, dim) # we do not use additional embedding if we use w2v\r\n self.pos_encoder = PositionalEncoding(d_model=512) #if w2v d_model=100 else:512\r\n # self.pos_encoder = LearnedPositionEncoding(d_model=512)\r\n # feature extraction layer\r\n\r\n self.gnn = gnn(n_fingerprint, gcn_pooling)\r\n prot_encoder_layer = nn.TransformerEncoderLayer(d_model,nhead,dim_feedforward,dropout)\r\n encoder_norm = nn.LayerNorm(d_model)\r\n self.prot_encoder = nn.TransformerEncoder(prot_encoder_layer, num_encoder_layers, encoder_norm)\r\n # self.fc = nn.Linear(100, d_model)\r\n\r\n # attention layers\r\n self.layer_coa = layer_coa\r\n # self.encoder_coa_layers = nn.ModuleList([encoder_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n self.encoder_coa_layers = encoder_cross_att(dim, nhead, dropout, layer_coa)\r\n self.inter_coa_layers = nn.ModuleList([inter_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n self.stack_coa_layers = nn.ModuleList([stack_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n\r\n\r\n # output layers\r\n self.layer_output = layer_output\r\n # self.W_out = nn.ModuleList([nn.Linear(2 * dim, 2 * dim)\r\n # for _ in range(layer_output)])\r\n\r\n self.W_out = nn.ModuleList([nn.Linear(2 * dim, dim),nn.Linear(dim, 128),nn.Linear(128, 64)\r\n ])\r\n\r\n self.W_interaction = nn.Linear(64, 2)\r\n\r\n # self._init_weight()\r\n\r\n\r\n def forward(self, inputs, proteins):\r\n\r\n \"\"\"Compound vector with GNN.\"\"\"\r\n compound_vector = self.gnn(inputs)\r\n # compound_vector = torch.unsqueeze(compound_vector, 0) #no-radius\r\n\r\n \"\"\"Protein vector with attention-CNN.\"\"\"\r\n # proteins = torch.unsqueeze(proteins, 0)\r\n protein_vector = self.embed_word(proteins)\r\n # protein_vector = torch.unsqueeze(protein_vector, 0)\r\n protein_vector = self.pos_encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector) #w2v\r\n protein_vector = self.prot_encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector)\r\n\r\n\r\n compound_vector = compound_vector.unsqueeze(0)\r\n if 'encoder' in self.co_attention:\r\n protein_vector, compound_vector = self.encoder_coa_layers(protein_vector, compound_vector)\r\n elif 'stack' in self.co_attention:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.stack_coa_layers[i](protein_vector, compound_vector)\r\n else:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.inter_coa_layers[i](protein_vector, compound_vector)\r\n\r\n\r\n protein_vector = protein_vector.mean(dim=1)\r\n # compound_vector = compound_vector.squeeze(1) #batch\r\n compound_vector = compound_vector.mean(dim=1)\r\n \"\"\"Concatenate the above two vectors and output the interaction.\"\"\"\r\n # catenate the two vectors\r\n cat_vector = torch.cat((compound_vector, protein_vector), 1)\r\n\r\n # sumarise the two vectors\r\n # cat_vector = compound_vector+protein_vector\r\n for j in range(self.layer_output):\r\n cat_vector = torch.tanh(self.W_out[j](cat_vector))\r\n interaction = self.W_interaction(cat_vector)\r\n\r\n return interaction\r\n\r\n def __call__(self, data, proteins, train=True):\r\n\r\n # inputs = data.x, data.edge_index, data.protein\r\n correct_interaction = data.y\r\n\r\n predicted_interaction = self.forward(data, proteins)\r\n\r\n if train:\r\n criterion = torch.nn.CrossEntropyLoss().to(device)\r\n loss = criterion(predicted_interaction, correct_interaction)\r\n return loss, predicted_interaction\r\n else:\r\n correct_labels = correct_interaction.to('cpu').data.numpy()\r\n ys = F.softmax(predicted_interaction, 1).to('cpu').data.numpy()\r\n predicted_labels = list(map(lambda x: np.argmax(x), ys))\r\n predicted_scores = list(map(lambda x: x[1], ys))\r\n return correct_labels, predicted_labels, predicted_scores\r\n\r\nclass Dtis_ablation(nn.Module):\r\n def __init__(self, n_fingerprint, dim, n_word, layer_output, layer_coa,\r\n d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, gcn_pooling =False):\r\n super(Dtis_ablation, self).__init__()\r\n\r\n # embedding layer\r\n self.embed_word = nn.Embedding(n_word, dim) # we do not use additional embedding if we use w2v\r\n self.pos_encoder = PositionalEncoding(d_model=512) #if w2v d_model=100 else:512\r\n # self.pos_encoder = LearnedPositionEncoding(d_model=512)\r\n # feature extraction layer\r\n\r\n self.gnn = gnn(n_fingerprint, gcn_pooling)\r\n prot_encoder_layer = nn.TransformerEncoderLayer(d_model,nhead,dim_feedforward,dropout)\r\n encoder_norm = nn.LayerNorm(d_model)\r\n self.prot_encoder = nn.TransformerEncoder(prot_encoder_layer, num_encoder_layers, encoder_norm)\r\n # self.fc = nn.Linear(100, d_model)\r\n\r\n # attention layers\r\n self.layer_coa = layer_coa\r\n # self.encoder_coa_layers = encoder_cross_att(dim, nhead, dropout, layer_coa)\r\n # self.inter_coa_layers = nn.ModuleList([inter_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n # self.stack_coa_layers = nn.ModuleList([stack_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n\r\n\r\n # output layers\r\n self.layer_output = layer_output\r\n # self.W_out = nn.ModuleList([nn.Linear(2 * dim, 2 * dim)\r\n # for _ in range(layer_output)])\r\n\r\n self.W_out = nn.ModuleList([nn.Linear(2 * dim, dim),nn.Linear(dim, 128),nn.Linear(128, 64)\r\n ])\r\n\r\n self.W_interaction = nn.Linear(64, 2)\r\n\r\n # self._init_weight()\r\n\r\n\r\n def forward(self, inputs, proteins):\r\n\r\n \"\"\"Compound vector with GNN.\"\"\"\r\n compound_vector = self.gnn(inputs)\r\n # compound_vector = torch.unsqueeze(compound_vector, 0) #no-radius\r\n\r\n \"\"\"Protein vector with attention-CNN.\"\"\"\r\n # proteins = torch.unsqueeze(proteins, 0)\r\n protein_vector = self.embed_word(proteins)\r\n # protein_vector = torch.unsqueeze(protein_vector, 0)\r\n protein_vector = self.pos_encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector) #w2v\r\n protein_vector = self.prot_encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector)\r\n\r\n\r\n compound_vector = compound_vector.unsqueeze(0)\r\n\r\n\r\n\r\n protein_vector = protein_vector.mean(dim=1)\r\n compound_vector = compound_vector.mean(dim=1)\r\n \"\"\"Concatenate the above two vectors and output the interaction.\"\"\"\r\n # catenate the two vectors\r\n cat_vector = torch.cat((compound_vector, protein_vector), 1)\r\n\r\n # sumarise the two vectors\r\n # cat_vector = compound_vector+protein_vector\r\n for j in range(self.layer_output):\r\n cat_vector = torch.tanh(self.W_out[j](cat_vector))\r\n interaction = self.W_interaction(cat_vector)\r\n # print(interaction)\r\n # print(F.softmax(interaction, 1).to('cpu').data.numpy())\r\n # exit()\r\n return interaction\r\n\r\n def __call__(self, data, proteins, train=True):\r\n\r\n # inputs = data.x, data.edge_index, data.protein\r\n correct_interaction = data.y\r\n\r\n predicted_interaction = self.forward(data, proteins)\r\n\r\n if train:\r\n criterion = torch.nn.CrossEntropyLoss().to(device)\r\n # correct_interaction = torch.tensor(correct_interaction, dtype=torch.long)\r\n loss = criterion(predicted_interaction, correct_interaction)\r\n return loss, predicted_interaction\r\n else:\r\n correct_labels = correct_interaction.to('cpu').data.numpy()\r\n ys = F.softmax(predicted_interaction, 1).to('cpu').data.numpy()\r\n predicted_labels = list(map(lambda x: np.argmax(x), ys))\r\n predicted_scores = list(map(lambda x: x[1], ys))\r\n return correct_labels, predicted_labels, predicted_scores\r\n\r\nclass Dtis_ablation_trans(nn.Module):\r\n def __init__(self, n_fingerprint, dim, n_word, layer_output, layer_coa,\r\n d_model=512, nhead=8, num_encoder_layers=6, dim_feedforward=2048, dropout=0.1, co_attention=False, gcn_pooling=False):\r\n super(Dtis_ablation_trans, self).__init__()\r\n\r\n self.co_attention = co_attention\r\n # embedding layer\r\n self.embed_word = nn.Embedding(n_word, dim) # we do not use additional embedding if we use w2v\r\n self.pos_encoder = PositionalEncoding(d_model=512) #if w2v d_model=100 else:512\r\n # self.pos_encoder = LearnedPositionEncoding(d_model=512)\r\n # feature extraction layer\r\n\r\n self.gnn = gnn(n_fingerprint, gcn_pooling)\r\n prot_encoder_layer = nn.TransformerEncoderLayer(d_model,nhead,dim_feedforward,dropout)\r\n encoder_norm = nn.LayerNorm(d_model)\r\n self.protein_encoder = nn.TransformerEncoder(prot_encoder_layer, num_encoder_layers, encoder_norm)\r\n # self.fc = nn.Linear(100, d_model)\r\n\r\n # attention layers\r\n self.layer_coa = layer_coa\r\n # self.encoder_coa_layers = encoder_cross_att(dim, nhead, dropout, layer_coa)\r\n self.inter_coa_layers = nn.ModuleList([inter_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n # self.stack_coa_layers = nn.ModuleList([stack_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n\r\n\r\n # output layers\r\n self.layer_output = layer_output\r\n # self.W_out = nn.ModuleList([nn.Linear(2 * dim, 2 * dim)\r\n # for _ in range(layer_output)])\r\n\r\n self.W_out = nn.ModuleList([nn.Linear(2 * dim, dim),nn.Linear(dim, 128),nn.Linear(128, 64)\r\n ])\r\n\r\n self.W_interaction = nn.Linear(64, 2)\r\n\r\n # self._init_weight()\r\n\r\n\r\n def forward(self, inputs, proteins):\r\n\r\n \"\"\"Compound vector with GNN.\"\"\"\r\n compound_vector = self.gnn(inputs)\r\n # compound_vector = torch.unsqueeze(compound_vector, 0) #no-radius\r\n\r\n \"\"\"Protein vector with attention-CNN.\"\"\"\r\n # proteins = torch.unsqueeze(proteins, 0)\r\n protein_vector = self.embed_word(proteins)\r\n # protein_vector = torch.unsqueeze(protein_vector, 0)\r\n # protein_vector = self.pos_encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector) #w2v\r\n # protein_vector = self.encoder(protein_vector)\r\n # protein_vector = self.fc(protein_vector)\r\n\r\n\r\n compound_vector = compound_vector.unsqueeze(0)\r\n\r\n if 'encoder' in self.co_attention:\r\n protein_vector, compound_vector = self.encoder_coa_layers(protein_vector, compound_vector)\r\n elif 'stack' in self.co_attention:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.stack_coa_layers[i](protein_vector, compound_vector)\r\n else:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.inter_coa_layers[i](protein_vector, compound_vector)\r\n\r\n protein_vector = protein_vector.mean(dim=1)\r\n compound_vector = compound_vector.mean(dim=1)\r\n \"\"\"Concatenate the above two vectors and output the interaction.\"\"\"\r\n # catenate the two vectors\r\n cat_vector = torch.cat((compound_vector, protein_vector), 1)\r\n\r\n # sumarise the two vectors\r\n # cat_vector = compound_vector+protein_vector\r\n for j in range(self.layer_output):\r\n cat_vector = torch.tanh(self.W_out[j](cat_vector))\r\n interaction = self.W_interaction(cat_vector)\r\n # print(interaction)\r\n # print(F.softmax(interaction, 1).to('cpu').data.numpy())\r\n # exit()\r\n return interaction\r\n\r\n def __call__(self, data, proteins, train=True):\r\n\r\n # inputs = data.x, data.edge_index, data.protein\r\n correct_interaction = data.y\r\n\r\n predicted_interaction = self.forward(data, proteins)\r\n\r\n if train:\r\n criterion = torch.nn.CrossEntropyLoss().to(device)\r\n # correct_interaction = torch.tensor(correct_interaction, dtype=torch.long)\r\n loss = criterion(predicted_interaction, correct_interaction)\r\n return loss, predicted_interaction\r\n else:\r\n correct_labels = correct_interaction.to('cpu').data.numpy()\r\n ys = F.softmax(predicted_interaction, 1).to('cpu').data.numpy()\r\n predicted_labels = list(map(lambda x: np.argmax(x), ys))\r\n predicted_scores = list(map(lambda x: x[1], ys))\r\n return correct_labels, predicted_labels, predicted_scores\r\n\r\nclass Dtis_ablation_gnn(nn.Module):\r\n def __init__(self, n_fingerprint, dim, n_word, layer_output, layer_coa,\r\n d_model=512, nhead=8, num_encoder_layers=3, dim_feedforward=1024, dropout=0.1, co_attention=False):\r\n super(Dtis_ablation_gnn, self).__init__()\r\n\r\n self.co_attention = co_attention\r\n # embedding layer\r\n self.embed_word = nn.Embedding(n_word, dim) # we do not use additional embedding if we use w2v\r\n self.embed_fingerprint = nn.Embedding(num_embeddings=n_fingerprint, embedding_dim=dim)\r\n self.pos_encoder = PositionalEncoding(d_model=512) #if w2v d_model=100 else:512\r\n\r\n\r\n prot_encoder_layer = nn.TransformerEncoderLayer(d_model,nhead,dim_feedforward,dropout)\r\n encoder_norm = nn.LayerNorm(d_model)\r\n self.protein_encoder = nn.TransformerEncoder(prot_encoder_layer, num_encoder_layers, encoder_norm)\r\n\r\n # attention layers\r\n self.layer_coa = layer_coa\r\n # self.encoder_coa_layers = encoder_cross_att(dim, nhead, dropout, layer_coa)\r\n self.inter_coa_layers = nn.ModuleList([inter_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n # self.stack_coa_layers = nn.ModuleList([stack_cross_att(dim, nhead, dropout) for _ in range(layer_coa)])\r\n\r\n\r\n # output layers\r\n self.layer_output = layer_output\r\n self.W_out = nn.ModuleList([nn.Linear(2 * dim, dim),nn.Linear(dim, 128),nn.Linear(128, 64)\r\n ])\r\n\r\n self.W_interaction = nn.Linear(64, 2)\r\n\r\n\r\n\r\n def forward(self, inputs, proteins):\r\n\r\n \"\"\"Compound vector with GNN.\"\"\"\r\n compound_vector = self.embed_fingerprint(inputs.x)\r\n # compound_vector = torch.unsqueeze(compound_vector, 0) #no-radius\r\n\r\n \"\"\"Protein vector with attention-CNN.\"\"\"\r\n # proteins = torch.unsqueeze(proteins, 0)\r\n protein_vector = self.embed_word(proteins)\r\n protein_vector = self.pos_encoder(protein_vector)\r\n protein_vector = self.protein_encoder(protein_vector)\r\n\r\n\r\n compound_vector = compound_vector.unsqueeze(0)\r\n compound_vector = compound_vector.squeeze(2)\r\n\r\n if 'encoder' in self.co_attention:\r\n protein_vector, compound_vector = self.encoder_coa_layers(protein_vector, compound_vector)\r\n elif 'stack' in self.co_attention:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.stack_coa_layers[i](protein_vector, compound_vector)\r\n else:\r\n for i in range(self.layer_coa):\r\n protein_vector, compound_vector = self.inter_coa_layers[i](protein_vector, compound_vector)\r\n\r\n protein_vector = protein_vector.mean(dim=1)\r\n compound_vector = compound_vector.mean(dim=1)\r\n \"\"\"Concatenate the above two vectors and output the interaction.\"\"\"\r\n # catenate the two vectors\r\n cat_vector = torch.cat((compound_vector, protein_vector), 1)\r\n\r\n # sumarise the two vectors\r\n # cat_vector = compound_vector+protein_vector\r\n for j in range(self.layer_output):\r\n cat_vector = torch.tanh(self.W_out[j](cat_vector))\r\n interaction = self.W_interaction(cat_vector)\r\n # print(interaction)\r\n # print(F.softmax(interaction, 1).to('cpu').data.numpy())\r\n # exit()\r\n return interaction\r\n\r\n def __call__(self, data, proteins, train=True):\r\n\r\n # inputs = data.x, data.edge_index, data.protein\r\n correct_interaction = data.y\r\n\r\n predicted_interaction = self.forward(data, proteins)\r\n\r\n if train:\r\n criterion = torch.nn.CrossEntropyLoss().to(device)\r\n # correct_interaction = torch.tensor(correct_interaction, dtype=torch.long)\r\n loss = criterion(predicted_interaction, correct_interaction)\r\n return loss, predicted_interaction\r\n else:\r\n correct_labels = correct_interaction.to('cpu').data.numpy()\r\n ys = F.softmax(predicted_interaction, 1).to('cpu').data.numpy()\r\n predicted_labels = list(map(lambda x: np.argmax(x), ys))\r\n predicted_scores = list(map(lambda x: x[1], ys))\r\n return correct_labels, predicted_labels, predicted_scores"
] | [
[
"torch.nn.functional.softmax",
"numpy.sqrt",
"torch.sin",
"torch.zeros",
"torch.cat",
"torch.nn.Embedding",
"torch.device",
"torch.pow",
"torch.nn.Dropout",
"torch.nn.CrossEntropyLoss",
"numpy.argmax",
"torch.nn.TransformerEncoder",
"torch.arange",
"torch.cos",
"torch.nn.BatchNorm1d",
"torch.nn.Linear",
"torch.nn.TransformerEncoderLayer",
"torch.nn.LayerNorm",
"torch.matmul",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rivernuthead/active_width_analysis | [
"fffd6c02941545cd32380453039bc567336baba0"
] | [
"active_width_analysis_v1.0.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Jun 7 19:05:57 2021\n\n@author: erri\n\n\nQuesto script analizza le immagini di differenza di saturazione e calcola,\nimpostate le soglie per di attività per monte e valle, il rapporto W_Active/W\nnello spazio e nel tempo \n\"\"\"\n# Import libraries\nimport os\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Set working directory and run parameters\nrun = 'q20_2rgm2'\ndt = 1 # set dt between photos [min]\nchart_name = 'Q=2.0 l/s'\nw_dir = os.getcwd() # Set Python script location as w_dir\npath_in = os.path.join(w_dir, 'input_images', run)\npath_out = os.path.join(w_dir, 'output_images', run)\n\n# Set directory\nif os.path.exists(path_in):\n pass\nelse:\n os.mkdir(path_in)\n \nif os.path.exists(path_out):\n pass\nelse:\n os.mkdir(path_out)\n\n# List file in input images directory\nfilenames = os.listdir(path_in)\n\n# Set differencing value treshold \nthr_up = 8\nthr_dwn = 14\n\n#Set flume Width [mm]\nW = 600\n\n# Set pixel dimension [mm]\npx = 0.96\n\nactive_W = []\nactive_W_percentuale = []\n\nfig1, ax1 = plt.subplots()\nfig1.set_dpi(300)\nax1.set_title('Wactive/W [-]'+ chart_name) #'+run)\nax1.set_xlabel('Coordinata longitudinale [m]')\nax1.set_ylim(0,1.1)\nax1.set_ylabel('Wactive/W [-]')\n\nfor filename in sorted(filenames):\n path = os.path.join(path_in, filename) # Build path\n if os.path.isfile(path): # If filename is a file (and not a folder)\n img = Image.open(path) # Set image\n np_img = np.array(img) # Convert image in np.array\n # Set different threshold for upstream and downstraem images\n if filename.endswith('cropped0.png'): # Upstream image\n thr = thr_up\n elif filename.endswith('cropped1.png'): # Downstream image\n thr = thr_dwn\n active_img = np_img>=thr # Create map with values >= thr\n dim_x, dim_y = np_img.shape\n cross_section = np.zeros(dim_y) # Initialize vector\n \n for i in range (0,dim_y):\n cross_section[i]=np.count_nonzero(active_img[:,i])\n #print(np.mean(cross_section))\n active_W = np.append(active_W, np.mean(cross_section)) \n active_W_perc = (np.mean(cross_section)*px/W)*100\n active_W_percentuale = np.append(active_W_percentuale, active_W_perc)\n print(filename, np_img.shape, 'threshold=',thr, 'Active_W=', f\"{np.mean(cross_section)*px:.3f}\", 'Active_W%', active_W_perc)\n # print(active_W_perc)\n X = np.linspace(0, dim_y, dim_y)\n \n ax1.plot(X*px/1000, cross_section*px/W, lw=0.1)\n\n\n#Plot Active Width % \nT = np.linspace(0, dt*len(active_W) , len(active_W)) # Time vector\n \nfig2, ax = plt.subplots()\nfig2.set_dpi(300)\nax.set_title('Wactive/W [-]'+ chart_name) #'+run)\nax.set_xlabel('Tempo [min]')\nax.set_ylabel('Wactive/W ')\nax.plot(T[10:-10], active_W_percentuale[10:-10]/100, marker=\"o\", markersize=2)\n\nprint('Active_W%_Mean=',np.mean(active_W_percentuale[10:-10]))"
] | [
[
"numpy.linspace",
"matplotlib.pyplot.subplots",
"numpy.append",
"numpy.mean",
"numpy.count_nonzero",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pymontecarlo/pymontecarlo-gui | [
"1b3c37d4b634a85c63f23d27ea8bd79bf5a43a2f"
] | [
"pymontecarlo_gui/widgets/lineedit.py"
] | [
"\"\"\"\"\"\"\n\n# Standard library modules.\nimport re\nimport math\n\n# Third party modules.\nfrom qtpy import QtWidgets, QtGui, QtCore\n\nimport numpy as np\n\n# Local modules.\nfrom pymontecarlo_gui.util.validate import (\n ValidableBase,\n VALID_BACKGROUND_STYLESHEET,\n INVALID_BACKGROUND_STYLESHEET,\n)\n\n# Globals and constants variables.\n\n\nclass DoubleValidatorAdapterMixin:\n def _get_double_validator(self): # pragma: no cover\n raise NotImplementedError\n\n def bottom(self):\n return self._get_double_validator().bottom()\n\n def setBottom(self, bottom):\n self._get_double_validator().setBottom(bottom)\n\n def decimals(self):\n return self._get_double_validator().decimals()\n\n def setDecimals(self, decimals):\n self._get_double_validator().setDecimals(decimals)\n\n def range(self):\n return self._get_double_validator().range()\n\n def setRange(self, bottom, top, decimals=0):\n self._get_double_validator().setRange(bottom, top, decimals)\n\n def top(self):\n return self._get_double_validator().top()\n\n def setTop(self, top):\n self._get_double_validator().setTop(top)\n\n\nclass LineEditAdapterMixin:\n def _get_lineedit(self): # pragma: no cover\n raise NotImplementedError\n\n def keyPressEvent(self, event):\n self._get_lineedit().keyPressEvent(event)\n\n def keyReleaseEvent(self, event):\n self._get_lineedit().keyReleaseEvent(event)\n\n def clear(self):\n self._get_lineedit().clear()\n\n def hasAcceptableInput(self):\n return self._get_lineedit().hasAcceptableInput()\n\n\nclass ColoredLineEdit(QtWidgets.QLineEdit, ValidableBase):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Signals\n self.textChanged.connect(self._on_text_changed)\n\n def _on_text_changed(self, text):\n if not self.isEnabled():\n self.setStyleSheet(VALID_BACKGROUND_STYLESHEET)\n return\n\n if self.hasAcceptableInput():\n self.setStyleSheet(VALID_BACKGROUND_STYLESHEET)\n else:\n self.setStyleSheet(INVALID_BACKGROUND_STYLESHEET)\n\n def isValid(self):\n return super().isValid() and self.hasAcceptableInput()\n\n def setEnabled(self, enabled):\n super().setEnabled(enabled)\n self._on_text_changed(self.text())\n\n def setValidator(self, validator):\n super().setValidator(validator)\n self._on_text_changed(self.text())\n\n\nclass ColoredFloatLineEdit(\n QtWidgets.QWidget, LineEditAdapterMixin, DoubleValidatorAdapterMixin, ValidableBase\n):\n\n valueChanged = QtCore.Signal(float)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n # Widgets\n self.lineedit = ColoredLineEdit()\n self.lineedit.setValidator(QtGui.QDoubleValidator())\n self._update_tooltip()\n\n # Layouts\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.lineedit)\n self.setLayout(layout)\n\n # Signals\n self.lineedit.textChanged.connect(self._on_text_changed)\n self.lineedit.validator().changed.connect(self._on_validator_changed)\n\n def _update_tooltip(self):\n locale = QtCore.QLocale.system()\n precision = self.decimals()\n tooltip = \"Value must be between [{}, {}]\".format(\n locale.toString(self.bottom(), \"f\", precision),\n locale.toString(self.top(), \"f\", precision),\n )\n self.lineedit.setToolTip(tooltip)\n self.setToolTip(tooltip)\n\n def _on_text_changed(self, *args):\n self.valueChanged.emit(self.value())\n\n def _on_validator_changed(self, *args):\n self._update_tooltip()\n self.setValue(self.value())\n\n def _get_double_validator(self):\n return self.lineedit.validator()\n\n def _get_lineedit(self):\n return self.lineedit\n\n def isValid(self):\n if not super().isValid():\n return False\n\n if not self.lineedit.isValid():\n return False\n\n locale = QtCore.QLocale.system()\n _value, ok = locale.toDouble(self.lineedit.text())\n if not ok:\n return False\n\n return True\n\n def value(self):\n locale = QtCore.QLocale.system()\n value, ok = locale.toDouble(self.lineedit.text())\n if not ok:\n return float(\"nan\")\n else:\n return value\n\n def setValue(self, value):\n locale = QtCore.QLocale.system()\n precision = self.decimals()\n if precision == 0:\n if not math.isfinite(value):\n value = 0\n value = int(value)\n text = locale.toString(value)\n else:\n value = float(value)\n text = locale.toString(value, \"f\", precision)\n self.lineedit.setText(text)\n\n def setEnabled(self, enabled):\n super().setEnabled(enabled)\n self.lineedit.setEnabled(enabled)\n\n\nMULTIFLOAT_SEPARATOR = \";\"\nMULTIFLOAT_PATTERN = r\"(?P<start>inf|[\\de\\.+\\-\\,]*)(?:\\:(?P<stop>[\\de\\.+\\-\\,]*))?(?:\\:(?P<step>[\\de\\.+\\-\\,]*))?\"\n\n\ndef parse_multifloat_text(text):\n locale = QtCore.QLocale.system()\n\n values = []\n\n for part in text.split(MULTIFLOAT_SEPARATOR):\n part = part.strip()\n if not part:\n continue\n\n match = re.match(MULTIFLOAT_PATTERN, part)\n if not match:\n raise ValueError(\"Invalid part: %s\" % part)\n\n start, _ok = locale.toDouble(match.group(\"start\"))\n\n stop = match.group(\"stop\")\n if stop is None:\n stop = start + 1\n else:\n stop, _ok = locale.toDouble(stop)\n\n step = match.group(\"step\")\n if step is None:\n step = 1\n else:\n step, _ok = locale.toDouble(step)\n\n if math.isinf(start):\n values.append(start)\n else:\n values.extend(np.arange(start, stop, step))\n\n return tuple(sorted(set(values)))\n\n\nclass MultiFloatValidator(QtGui.QValidator, DoubleValidatorAdapterMixin):\n def __init__(self):\n super().__init__()\n\n # Variables\n expr = QtCore.QRegularExpression(r\"^[\\de\\-.,+:;]+$\")\n self.validator_text = QtGui.QRegularExpressionValidator(expr)\n self.validator_value = QtGui.QDoubleValidator()\n\n # Signals\n self.validator_text.changed.connect(self.changed)\n self.validator_value.changed.connect(self.changed)\n\n def validate(self, input, pos):\n if not input:\n return QtGui.QValidator.Intermediate, input, pos\n\n state, input, pos = self.validator_text.validate(input, pos)\n if state == QtGui.QValidator.Invalid:\n return state, input, pos\n\n try:\n values = parse_multifloat_text(input)\n except:\n return QtGui.QValidator.Intermediate, input, pos\n\n for value in values:\n if self.decimals() == 0:\n text = str(int(value))\n else:\n locale = QtCore.QLocale.system()\n text = locale.toString(value, \"g\", self.decimals())\n state, _, _ = self.validator_value.validate(text, pos)\n if state != QtGui.QValidator.Acceptable:\n return state, input, pos\n\n return QtGui.QValidator.Acceptable, input, pos\n\n def _get_double_validator(self):\n return self.validator_value\n\n\nclass ColoredMultiFloatLineEdit(\n QtWidgets.QWidget, LineEditAdapterMixin, DoubleValidatorAdapterMixin, ValidableBase\n):\n\n valuesChanged = QtCore.Signal(tuple)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n\n # Widgets\n self.lineedit = ColoredLineEdit()\n self.lineedit.setValidator(MultiFloatValidator())\n self._update_tooltip()\n\n # Layouts\n layout = QtWidgets.QHBoxLayout()\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.lineedit)\n self.setLayout(layout)\n\n # Signals\n self.lineedit.textChanged.connect(self._on_text_changed)\n self.lineedit.validator().changed.connect(self._on_validator_changed)\n\n def _update_tooltip(self):\n locale = QtCore.QLocale.system()\n precision = self.decimals()\n tooltip = \"Value(s) must be between [{}, {}]\".format(\n locale.toString(self.bottom(), \"f\", precision),\n locale.toString(self.top(), \"f\", precision),\n )\n self.lineedit.setToolTip(tooltip)\n self.setToolTip(tooltip)\n\n def _on_text_changed(self, *args):\n self.valuesChanged.emit(self.values())\n\n def _on_validator_changed(self, *args):\n self._update_tooltip()\n self.setValues(self.values())\n\n def _get_double_validator(self):\n return self.lineedit.validator()\n\n def _get_lineedit(self):\n return self.lineedit\n\n def isValid(self):\n if not super().isValid():\n return False\n\n if not self.lineedit.isValid():\n return False\n\n try:\n parse_multifloat_text(self.lineedit.text())\n except:\n return False\n\n return True\n\n def values(self):\n try:\n return parse_multifloat_text(self.lineedit.text())\n except:\n return ()\n\n def setValues(self, values):\n locale = QtCore.QLocale.system()\n precision = self.decimals()\n\n text_values = []\n for value in values:\n if precision == 0:\n value = int(value)\n text_values.append(locale.toString(value))\n else:\n value = float(value)\n text_values.append(locale.toString(value, \"f\", precision))\n\n text = MULTIFLOAT_SEPARATOR.join(text_values)\n self.lineedit.setText(text)\n\n def setEnabled(self, enabled):\n super().setEnabled(enabled)\n self.lineedit.setEnabled(enabled)\n\n\ndef run(): # pragma: no cover\n import sys\n\n app = QtWidgets.QApplication(sys.argv)\n\n widget = ColoredMultiFloatLineEdit()\n widget.setRange(1.0, 5.0, 2)\n widget.setValues([3.0, 4.12345])\n\n mainwindow = QtWidgets.QMainWindow()\n mainwindow.setCentralWidget(widget)\n mainwindow.show()\n\n app.exec_()\n\n\nif __name__ == \"__main__\": # pragma: no cover\n run()\n"
] | [
[
"numpy.arange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
European-XFEL/EXtra-foam | [
"f8d225db6b8923d0cce9db2b8c8a80613600b64c"
] | [
"extra_foam/pipeline/tests/test_worker.py"
] | [
"import unittest\nfrom threading import Thread\nfrom unittest.mock import MagicMock, patch\nimport multiprocessing as mp\n\nfrom . import _TestDataMixin\nfrom extra_foam.pipeline.exceptions import ProcessingError, StopPipelineError\nfrom extra_foam.pipeline.f_worker import TrainWorker, PulseWorker\nfrom extra_foam.config import config, ExtensionType\nfrom extra_foam.pipeline.f_zmq import FoamZmqClient\n\nimport numpy as np\nfrom karabo_bridge import Client\n\n\[email protected](config._data, {\"DETECTOR\": \"LPD\"})\nclass TestWorker(_TestDataMixin, unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls._pause_ev = mp.Event()\n cls._close_ev = mp.Event()\n\n @patch('extra_foam.ipc.ProcessLogger.debug')\n @patch('extra_foam.ipc.ProcessLogger.error')\n def testRunTasks(self, error, debug):\n for kls in (TrainWorker, PulseWorker):\n worker = kls(self._pause_ev, self._close_ev)\n for proc in worker._tasks:\n proc.update = MagicMock()\n proc.process = MagicMock()\n worker._run_tasks({})\n\n proc = worker._tasks[0]\n\n # test responses to different Exceptions\n\n proc.process.side_effect = ValueError()\n worker._run_tasks({})\n debug.assert_called_once()\n self.assertIn(\"Unexpected Exception\", debug.call_args_list[0][0][0])\n debug.reset_mock()\n error.assert_called_once()\n error.reset_mock()\n\n proc.process.side_effect = ProcessingError()\n worker._run_tasks({})\n debug.assert_called_once()\n self.assertNotIn(\"Unexpected Exception\", debug.call_args_list[0][0][0])\n debug.reset_mock()\n error.assert_called_once()\n error.reset_mock()\n\n proc.process.side_effect = StopPipelineError()\n with self.assertRaises(StopPipelineError):\n worker._run_tasks({})\n debug.reset_mock()\n error.reset_mock()\n\n # Check that the extensions are enabled appropriately\n extensions_enabled = kls == TrainWorker\n self.assertEqual(worker._extension != None, extensions_enabled)\n self.assertEqual(worker._detector_extension != None, extensions_enabled)\n\n @patch('extra_foam.ipc.ProcessLogger.debug')\n def testExtensions(self, _):\n worker = TrainWorker(self._pause_ev, self._close_ev)\n\n # Disable processors\n worker._run_tasks = MagicMock()\n\n # Generate mock data\n mock_data = self.simple_data(1, (10, 10))[0]\n detector, key = mock_data[\"catalog\"].main_detector.split()\n\n # Mock the input and output pipes\n worker._input.start = MagicMock()\n worker._input.get = MagicMock(return_value=mock_data)\n worker._output = MagicMock()\n\n # Mock the database configuration for the extension ZmqOutQueue's\n extension_endpoint = \"ipc://foam-extension\"\n detector_extension_endpoint = \"ipc://bridge-extension\"\n worker._extension._meta.hget_all = MagicMock(return_value={ ExtensionType.ALL_OUTPUT.value: extension_endpoint })\n worker._detector_extension._meta.hget_all = MagicMock(return_value={ ExtensionType.DETECTOR_OUTPUT.value: detector_extension_endpoint })\n\n # Start worker\n self._pause_ev.set()\n worker_thread = Thread(target=worker.run)\n worker_thread.start()\n\n # Create clients\n bridge_client = Client(detector_extension_endpoint, timeout=1)\n foam_client = FoamZmqClient(extension_endpoint, timeout=1)\n\n # Test received detector data\n detector_data, _ = bridge_client.next()\n np.testing.assert_array_equal(detector_data[f\"EF_{detector}\"][key],\n mock_data[\"processed\"].image.masked_mean)\n\n # Test received special suite data\n foam_data = foam_client.next()\n for key in foam_data:\n if key != \"processed\":\n self.assertEqual(foam_data[key], mock_data[key])\n else:\n # Just comparing the detector image is enough for the\n # ProcessedData object.\n np.testing.assert_array_equal(foam_data[key].image.masked_mean,\n mock_data[key].image.masked_mean)\n\n # Close worker\n self._close_ev.set()\n worker_thread.join(timeout=1)\n"
] | [
[
"numpy.testing.assert_array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
borley1211/adaptune | [
"f1d389dd189cc31ad3ada8a17aee42a943075ebd"
] | [
"audapter/driver/filter.py"
] | [
"from typing import Optional\n\nimport numpy as np\nfrom nptyping import Array\nfrom pyroomacoustics.transform import STFT\n\nfrom ..domain.model import FilterModel\nfrom ..helper.config import load_settings\nfrom ..interface.driver.filter_driver import FilterDriverABC\n\nsettings = load_settings()\n\n\nclass FilterDriver(FilterDriverABC):\n def __init__(self, shape):\n self.shape = shape\n self.filter_ = FilterModel(\n settings.get(\"FILTER.model\"),\n self.shape,\n settings.get(\"FILTER.mu\"),\n settings.get(\"FILTER.w\"),\n settings.get(\"FILTER.lambda_\"),\n )\n\n def run(self, desired, data_in) -> Array:\n return self.filter_.update(desired, data_in)\n\n def get_filter_weights(self) -> Array:\n return self.filter_.w\n\n\ndef apply_filter(\n w: Array, x: Array, domain: str, stftobj: Optional[STFT] = None\n) -> Array:\n if domain == \"time\":\n return np.dot(w, x.T[::-1])\n elif domain == \"freq\":\n if not stftobj:\n raise ValueError(\"In FREQ domain, you HAVE TO SET 'stftobj'\")\n stftobj.analysis(x)\n X = stftobj.X[:]\n return stftobj.synthesis(np.diag(np.dot(w, X)))\n"
] | [
[
"numpy.dot"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
madaari/memcached | [
"cb79bc92089ba4f1c50cdf4e7aafddaeae2edf97"
] | [
"coyotest/NewTest4Core/Combined/OnlySlab/plot_script_portfolio.py"
] | [
"import pandas as pd\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom plotly.subplots import make_subplots\n\ndf1 = pd.read_csv('./WithoutNekara/memcached_coverage.txt')\n\ntrace1 = go.Scatter(\n x=df1['x'],\n y=df1['y'],\n name='Without Nekara'\n)\n\ndf2 = pd.read_csv('./WithNekara/memcached_coverage.txt')\n\ntrace2 = go.Scatter(\n x=df2['x'],\n y=df2['y'],\n name='With Nekara, Portfolio strategy'\n)\n\n\nfig = make_subplots(1,1);\nfig.update_layout(title_text=\"Memcached coverage. Hash of only Slab using the combined test case\");\n\nfig.add_trace(trace1);\nfig.add_trace(trace2);\n\nfig.show()\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
congve1/self-critical.pytorch | [
"d2434682f3eccc72517e0b6af326b8d53a79a898"
] | [
"train.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.tensorboard import SummaryWriter\n\nimport numpy as np\n\nimport time\nimport os\nfrom six.moves import cPickle\nimport traceback\nfrom collections import defaultdict\n\nimport opts\nimport models\nfrom dataloader import *\nimport skimage.io\nimport eval_utils\nimport misc.utils as utils\nfrom misc.rewards import init_scorer, get_self_critical_reward\nfrom misc.loss_wrapper import LossWrapper\n\n\ndef add_summary_value(writer, key, value, iteration):\n if writer:\n writer.add_scalar(key, value, iteration)\n\ndef train(opt):\n\n ################################\n # Build dataloader\n ################################\n loader = DataLoader(opt)\n opt.vocab_size = loader.vocab_size\n opt.seq_length = loader.seq_length\n\n ##########################\n # Initialize infos\n ##########################\n infos = {\n 'iter': 0,\n 'epoch': 0,\n 'loader_state_dict': None,\n 'vocab': loader.get_vocab(),\n }\n # Load old infos(if there is) and check if models are compatible\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl')):\n with open(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl'), 'rb') as f:\n infos = utils.pickle_load(f)\n saved_model_opt = infos['opt']\n need_be_same=[\"caption_model\", \"rnn_type\", \"rnn_size\", \"num_layers\"]\n for checkme in need_be_same:\n assert getattr(saved_model_opt, checkme) == getattr(opt, checkme), \"Command line argument and saved model disagree on '%s' \" % checkme\n infos['opt'] = opt\n\n #########################\n # Build logger\n #########################\n # naive dict logger\n histories = defaultdict(dict)\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl')):\n with open(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:\n histories.update(utils.pickle_load(f))\n\n # tensorboard logger\n tb_summary_writer = SummaryWriter(opt.checkpoint_path)\n\n ##########################\n # Build model\n ##########################\n opt.vocab = loader.get_vocab()\n model = models.setup(opt).cuda()\n del opt.vocab\n # Load pretrained weights:\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'model.pth')):\n model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))\n \n # Wrap generation model with loss function(used for training)\n # This allows loss function computed separately on each machine\n lw_model = LossWrapper(model, opt)\n # Wrap with dataparallel\n dp_model = torch.nn.DataParallel(model)\n dp_lw_model = torch.nn.DataParallel(lw_model)\n\n ##########################\n # Build optimizer\n ##########################\n if opt.noamopt:\n assert opt.caption_model == 'transformer', 'noamopt can only work with transformer'\n optimizer = utils.get_std_opt(model, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)\n elif opt.reduce_on_plateau:\n optimizer = utils.build_optimizer(model.parameters(), opt)\n optimizer = utils.ReduceLROnPlateau(optimizer, factor=0.5, patience=3)\n else:\n optimizer = utils.build_optimizer(model.parameters(), opt)\n # Load the optimizer\n if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from,\"optimizer.pth\")):\n optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))\n\n #########################\n # Get ready to start\n #########################\n iteration = infos['iter']\n epoch = infos['epoch']\n # For back compatibility\n if 'iterators' in infos:\n infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}\n loader.load_state_dict(infos['loader_state_dict'])\n if opt.load_best_score == 1:\n best_val_score = infos.get('best_val_score', None)\n if opt.noamopt:\n optimizer._step = iteration\n # flag indicating finish of an epoch\n # Always set to True at the beginning to initialize the lr or etc.\n epoch_done = True\n # Assure in training mode\n dp_lw_model.train()\n\n # Start training\n try:\n while True:\n # Stop if reaching max epochs\n if epoch >= opt.max_epochs and opt.max_epochs != -1:\n break\n\n if epoch_done:\n if not opt.noamopt and not opt.reduce_on_plateau:\n # Assign the learning rate\n if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:\n frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every\n decay_factor = opt.learning_rate_decay_rate ** frac\n opt.current_lr = opt.learning_rate * decay_factor\n else:\n opt.current_lr = opt.learning_rate\n utils.set_lr(optimizer, opt.current_lr) # set the decayed rate\n # Assign the scheduled sampling prob\n if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:\n frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every\n opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)\n model.ss_prob = opt.ss_prob\n\n # If start self critical training\n if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:\n sc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n sc_flag = False\n \n # If start structure loss training\n if opt.structure_after != -1 and epoch >= opt.structure_after:\n struc_flag = True\n init_scorer(opt.cached_tokens)\n else:\n struc_flag = False\n\n epoch_done = False\n \n start = time.time()\n # Load data from train split (0)\n data = loader.get_batch('train')\n print('Read data:', time.time() - start)\n\n torch.cuda.synchronize()\n start = time.time()\n\n tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]\n tmp = [_ if _ is None else _.cuda() for _ in tmp]\n fc_feats, att_feats, labels, masks, att_masks = tmp\n \n optimizer.zero_grad()\n model_out = dp_lw_model(fc_feats, att_feats, labels, masks, att_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)\n\n loss = model_out['loss'].mean()\n\n loss.backward()\n getattr(torch.nn.utils, 'clip_grad_%s_' %(opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)\n optimizer.step()\n train_loss = loss.item()\n torch.cuda.synchronize()\n end = time.time()\n if struc_flag:\n print(\"iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))\n elif not sc_flag:\n print(\"iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, train_loss, end - start))\n else:\n print(\"iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}\" \\\n .format(iteration, epoch, model_out['reward'].mean(), end - start))\n\n # Update the iteration and epoch\n iteration += 1\n if data['bounds']['wrapped']:\n epoch += 1\n epoch_done = True\n\n # Write the training loss summary\n if (iteration % opt.losses_log_every == 0):\n tb_summary_writer.add_scalar('train_loss', train_loss, iteration)\n if opt.noamopt:\n opt.current_lr = optimizer.rate()\n elif opt.reduce_on_plateau:\n opt.current_lr = optimizer.current_lr\n tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)\n tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)\n if sc_flag:\n tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)\n elif struc_flag:\n tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)\n tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)\n\n histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()\n histories['lr_history'][iteration] = opt.current_lr\n histories['ss_prob_history'][iteration] = model.ss_prob\n\n # update infos\n infos['iter'] = iteration\n infos['epoch'] = epoch\n infos['loader_state_dict'] = loader.state_dict()\n \n # make evaluation on validation set, and save model\n if (iteration % opt.save_checkpoint_every == 0 and not opt.save_every_epoch) or \\\n (epoch_done and opt.save_every_epoch):\n # eval model\n eval_kwargs = {'split': 'val',\n 'dataset': opt.input_json}\n eval_kwargs.update(vars(opt))\n val_loss, predictions, lang_stats = eval_utils.eval_split(\n dp_model, lw_model.crit, loader, eval_kwargs)\n\n if opt.reduce_on_plateau:\n if 'CIDEr' in lang_stats:\n optimizer.scheduler_step(-lang_stats['CIDEr'])\n else:\n optimizer.scheduler_step(val_loss)\n # Write validation result into summary\n tb_summary_writer.add_scalar('validation loss', val_loss, iteration)\n if lang_stats is not None:\n for k,v in lang_stats.items():\n tb_summary_writer.add_scalar(k, v, iteration)\n histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}\n\n # Save model if is improving on validation result\n if opt.language_eval == 1:\n current_score = lang_stats['CIDEr']\n else:\n current_score = - val_loss\n\n best_flag = False\n\n if best_val_score is None or current_score > best_val_score:\n best_val_score = current_score\n best_flag = True\n\n # Dump miscalleous informations\n infos['best_val_score'] = best_val_score\n\n utils.save_checkpoint(opt, model, infos, optimizer, histories)\n if opt.save_history_ckpt:\n utils.save_checkpoint(opt, model, infos, optimizer,\n append=str(epoch) if opt.save_every_epoch else str(iteration))\n\n if best_flag:\n utils.save_checkpoint(opt, model, infos, optimizer, append='best')\n\n except (RuntimeError, KeyboardInterrupt):\n print('Save ckpt on exception ...')\n utils.save_checkpoint(opt, model, infos, optimizer)\n print('Save ckpt done.')\n stack_trace = traceback.format_exc()\n print(stack_trace)\n\n\nopt = opts.parse_opt()\ntrain(opt)\n"
] | [
[
"torch.nn.DataParallel",
"torch.cuda.synchronize",
"torch.utils.tensorboard.SummaryWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cyac15/visualization_COVID19_module | [
"ad34a87085b449bd3cf4b1463693cdcbe89fdd11"
] | [
"CovidVoting/add_data.py"
] | [
"\"\"\"This module creates a base data set that add_data() build on.\nadd_data_csv(base_data, new_data, base_state_col, new_state_col,\nuse_state, how_join)\nThe purpose of this module is to prepare data for visualization\n\"\"\"\n# Import Packages\nimport pandas as pd\nimport geopandas as gpd\n\n\ndef add_data_csv(base_data, new_data, base_state_col, new_state_col,\n use_state, how_join):\n \"\"\"\n Args:\n base_data (str):filename with path of starting dataframe that\n new_data (str): filename with path to csv of new data to be added\n base_state_col(str): name of state column of base data\n new_state_col(str): name of state column of new data\n use_state (list): list of strings of the desired states\n how_join (string): (‘left’, ‘right’, ‘outer’, ‘inner’)\n\n Returns:\n merged_df: the dataframe with the newly added data\n \"\"\"\n # Read the dataframe to add too\n base_df = pd.read_csv(base_data)\n # Read data being added as a dataframe\n new_df = pd.read_csv(new_data)\n\n # Drop nondesired states\n base_df = base_df.loc[base_df[base_state_col].isin(use_state)]\n new_df = new_df.loc[new_df[new_state_col].isin(use_state)]\n\n # Merge datasets\n merged_df = pd.merge(left=base_df, right=new_df, how=how_join,\n left_on=[base_state_col], right_on=[new_state_col])\n return merged_df\n\n"
] | [
[
"pandas.merge",
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
arthurmensch/scikit-learn | [
"d4c9e849ba1ab27930190d93fcfc33cc4b82470d"
] | [
"sklearn/preprocessing/_encoders.py"
] | [
"# Authors: Andreas Mueller <[email protected]>\n# Joris Van den Bossche <[email protected]>\n# License: BSD 3 clause\n\nfrom __future__ import division\n\nimport numbers\nimport warnings\n\nimport numpy as np\nfrom scipy import sparse\n\nfrom .. import get_config as _get_config\nfrom ..base import BaseEstimator, TransformerMixin\nfrom ..externals import six\nfrom ..utils import check_array\nfrom ..utils import deprecated\nfrom ..utils.fixes import _argmax, _object_dtype_isnan\nfrom ..utils.validation import check_is_fitted\n\nfrom .base import _transform_selected\nfrom .label import _encode, _encode_check_unknown\n\n\nrange = six.moves.range\n\n\n__all__ = [\n 'OneHotEncoder',\n 'OrdinalEncoder'\n]\n\n\nclass _BaseEncoder(BaseEstimator, TransformerMixin):\n \"\"\"\n Base class for encoders that includes the code to categorize and\n transform the input features.\n\n \"\"\"\n\n def _check_X(self, X):\n \"\"\"\n Perform custom check_array:\n - convert list of strings to object dtype\n - check for missing values for object dtype data (check_array does\n not do that)\n\n \"\"\"\n X_temp = check_array(X, dtype=None)\n if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):\n X = check_array(X, dtype=np.object)\n else:\n X = X_temp\n\n if X.dtype == np.dtype('object'):\n if not _get_config()['assume_finite']:\n if _object_dtype_isnan(X).any():\n raise ValueError(\"Input contains NaN\")\n\n return X\n\n def _fit(self, X, handle_unknown='error'):\n X = self._check_X(X)\n\n n_samples, n_features = X.shape\n\n if self._categories != 'auto':\n if X.dtype != object:\n for cats in self._categories:\n if not np.all(np.sort(cats) == np.array(cats)):\n raise ValueError(\"Unsorted categories are not \"\n \"supported for numerical categories\")\n if len(self._categories) != n_features:\n raise ValueError(\"Shape mismatch: if n_values is an array,\"\n \" it has to be of shape (n_features,).\")\n\n self.categories_ = []\n\n for i in range(n_features):\n Xi = X[:, i]\n if self._categories == 'auto':\n cats = _encode(Xi)\n else:\n cats = np.array(self._categories[i], dtype=X.dtype)\n if handle_unknown == 'error':\n diff = _encode_check_unknown(Xi, cats)\n if diff:\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during fit\".format(diff, i))\n raise ValueError(msg)\n self.categories_.append(cats)\n\n def _transform(self, X, handle_unknown='error'):\n X = self._check_X(X)\n\n _, n_features = X.shape\n X_int = np.zeros_like(X, dtype=np.int)\n X_mask = np.ones_like(X, dtype=np.bool)\n\n for i in range(n_features):\n Xi = X[:, i]\n diff, valid_mask = _encode_check_unknown(Xi, self.categories_[i],\n return_mask=True)\n\n if not np.all(valid_mask):\n if handle_unknown == 'error':\n msg = (\"Found unknown categories {0} in column {1}\"\n \" during transform\".format(diff, i))\n raise ValueError(msg)\n else:\n # Set the problematic rows to an acceptable value and\n # continue `The rows are marked `X_mask` and will be\n # removed later.\n X_mask[:, i] = valid_mask\n Xi = Xi.copy()\n Xi[~valid_mask] = self.categories_[i][0]\n _, encoded = _encode(Xi, self.categories_[i], encode=True)\n X_int[:, i] = encoded\n\n return X_int, X_mask\n\n\nclass OneHotEncoder(_BaseEncoder):\n \"\"\"Encode categorical integer features as a one-hot numeric array.\n\n The input to this transformer should be an array-like of integers or\n strings, denoting the values taken on by categorical (discrete) features.\n The features are encoded using a one-hot (aka 'one-of-K' or 'dummy')\n encoding scheme. This creates a binary column for each category and\n returns a sparse matrix or dense array.\n\n By default, the encoder derives the categories based on the unique values\n in each feature. Alternatively, you can also specify the `categories`\n manually.\n The OneHotEncoder previously assumed that the input features take on\n values in the range [0, max(values)). This behaviour is deprecated.\n\n This encoding is needed for feeding categorical data to many scikit-learn\n estimators, notably linear models and SVMs with the standard kernels.\n\n Note: a one-hot encoding of y labels should use a LabelBinarizer\n instead.\n\n Read more in the :ref:`User Guide <preprocessing_categorical_features>`.\n\n Parameters\n ----------\n categories : 'auto' or a list of lists/arrays of values, default='auto'.\n Categories (unique values) per feature:\n\n - 'auto' : Determine categories automatically from the training data.\n - list : ``categories[i]`` holds the categories expected in the ith\n column. The passed categories should not mix strings and numeric\n values within a single feature, and should be sorted in case of\n numeric values.\n\n The used categories can be found in the ``categories_`` attribute.\n\n sparse : boolean, default=True\n Will return sparse matrix if set True else will return an array.\n\n dtype : number type, default=np.float\n Desired dtype of output.\n\n handle_unknown : 'error' or 'ignore', default='error'.\n Whether to raise an error or ignore if an unknown categorical feature\n is present during transform (default is to raise). When this parameter\n is set to 'ignore' and an unknown category is encountered during\n transform, the resulting one-hot encoded columns for this feature\n will be all zeros. In the inverse transform, an unknown category\n will be denoted as None.\n\n n_values : 'auto', int or array of ints, default='auto'\n Number of values per feature.\n\n - 'auto' : determine value range from training data.\n - int : number of categorical values per feature.\n Each feature value should be in ``range(n_values)``\n - array : ``n_values[i]`` is the number of categorical values in\n ``X[:, i]``. Each feature value should be\n in ``range(n_values[i])``\n\n .. deprecated:: 0.20\n The `n_values` keyword was deprecated in version 0.20 and will\n be removed in 0.22. Use `categories` instead.\n\n categorical_features : 'all' or array of indices or mask, default='all'\n Specify what features are treated as categorical.\n\n - 'all': All features are treated as categorical.\n - array of indices: Array of categorical feature indices.\n - mask: Array of length n_features and with dtype=bool.\n\n Non-categorical features are always stacked to the right of the matrix.\n\n .. deprecated:: 0.20\n The `categorical_features` keyword was deprecated in version\n 0.20 and will be removed in 0.22.\n You can use the ``ColumnTransformer`` instead.\n\n Attributes\n ----------\n categories_ : list of arrays\n The categories of each feature determined during fitting\n (in order of the features in X and corresponding with the output\n of ``transform``).\n\n active_features_ : array\n Indices for active features, meaning values that actually occur\n in the training set. Only available when n_values is ``'auto'``.\n\n .. deprecated:: 0.20\n The ``active_features_`` attribute was deprecated in version\n 0.20 and will be removed in 0.22.\n\n feature_indices_ : array of shape (n_features,)\n Indices to feature ranges.\n Feature ``i`` in the original data is mapped to features\n from ``feature_indices_[i]`` to ``feature_indices_[i+1]``\n (and then potentially masked by ``active_features_`` afterwards)\n\n .. deprecated:: 0.20\n The ``feature_indices_`` attribute was deprecated in version\n 0.20 and will be removed in 0.22.\n\n n_values_ : array of shape (n_features,)\n Maximum number of values per feature.\n\n .. deprecated:: 0.20\n The ``n_values_`` attribute was deprecated in version\n 0.20 and will be removed in 0.22.\n\n Examples\n --------\n Given a dataset with two features, we let the encoder find the unique\n values per feature and transform the data to a binary one-hot encoding.\n\n >>> from sklearn.preprocessing import OneHotEncoder\n >>> enc = OneHotEncoder(handle_unknown='ignore')\n >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]\n >>> enc.fit(X)\n ... # doctest: +ELLIPSIS\n OneHotEncoder(categorical_features=None, categories=None,\n dtype=<... 'numpy.float64'>, handle_unknown='ignore',\n n_values=None, sparse=True)\n\n >>> enc.categories_\n [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]\n >>> enc.transform([['Female', 1], ['Male', 4]]).toarray()\n array([[1., 0., 1., 0., 0.],\n [0., 1., 0., 0., 0.]])\n >>> enc.inverse_transform([[0, 1, 1, 0, 0], [0, 0, 0, 1, 0]])\n array([['Male', 1],\n [None, 2]], dtype=object)\n >>> enc.get_feature_names()\n array(['x0_Female', 'x0_Male', 'x1_1', 'x1_2', 'x1_3'], dtype=object)\n\n See also\n --------\n sklearn.preprocessing.OrdinalEncoder : performs an ordinal (integer)\n encoding of the categorical features.\n sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of\n dictionary items (also handles string-valued features).\n sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot\n encoding of dictionary items or strings.\n sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all\n fashion.\n sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of\n iterables and a multilabel format, e.g. a (samples x classes) binary\n matrix indicating the presence of a class label.\n \"\"\"\n\n def __init__(self, n_values=None, categorical_features=None,\n categories=None, sparse=True, dtype=np.float64,\n handle_unknown='error'):\n self.categories = categories\n self.sparse = sparse\n self.dtype = dtype\n self.handle_unknown = handle_unknown\n self.n_values = n_values\n self.categorical_features = categorical_features\n\n # Deprecated attributes\n\n @property\n @deprecated(\"The ``active_features_`` attribute was deprecated in version \"\n \"0.20 and will be removed 0.22.\")\n def active_features_(self):\n check_is_fitted(self, 'categories_')\n return self._active_features_\n\n @property\n @deprecated(\"The ``feature_indices_`` attribute was deprecated in version \"\n \"0.20 and will be removed 0.22.\")\n def feature_indices_(self):\n check_is_fitted(self, 'categories_')\n return self._feature_indices_\n\n @property\n @deprecated(\"The ``n_values_`` attribute was deprecated in version \"\n \"0.20 and will be removed 0.22.\")\n def n_values_(self):\n check_is_fitted(self, 'categories_')\n return self._n_values_\n\n def _handle_deprecations(self, X):\n\n # internal version of the attributes to handle deprecations\n self._categories = getattr(self, '_categories', None)\n self._categorical_features = getattr(self, '_categorical_features',\n None)\n\n # user manually set the categories or second fit -> never legacy mode\n if self.categories is not None or self._categories is not None:\n self._legacy_mode = False\n if self.categories is not None:\n self._categories = self.categories\n\n # categories not set -> infer if we need legacy mode or not\n elif self.n_values is not None and self.n_values != 'auto':\n msg = (\n \"Passing 'n_values' is deprecated in version 0.20 and will be \"\n \"removed in 0.22. You can use the 'categories' keyword \"\n \"instead. 'n_values=n' corresponds to 'categories=[range(n)]'.\"\n )\n warnings.warn(msg, DeprecationWarning)\n self._legacy_mode = True\n\n else: # n_values = 'auto'\n if self.handle_unknown == 'ignore':\n # no change in behaviour, no need to raise deprecation warning\n self._legacy_mode = False\n self._categories = 'auto'\n if self.n_values == 'auto':\n # user manually specified this\n msg = (\n \"Passing 'n_values' is deprecated in version 0.20 and \"\n \"will be removed in 0.22. n_values='auto' can be \"\n \"replaced with categories='auto'.\"\n )\n warnings.warn(msg, DeprecationWarning)\n else:\n\n # check if we have integer or categorical input\n try:\n X = check_array(X, dtype=np.int)\n except ValueError:\n self._legacy_mode = False\n self._categories = 'auto'\n else:\n msg = (\n \"The handling of integer data will change in version \"\n \"0.22. Currently, the categories are determined \"\n \"based on the range [0, max(values)], while in the \"\n \"future they will be determined based on the unique \"\n \"values.\\nIf you want the future behaviour and \"\n \"silence this warning, you can specify \"\n \"\\\"categories='auto'\\\".\\n\"\n \"In case you used a LabelEncoder before this \"\n \"OneHotEncoder to convert the categories to integers, \"\n \"then you can now use the OneHotEncoder directly.\"\n )\n warnings.warn(msg, FutureWarning)\n self._legacy_mode = True\n self.n_values = 'auto'\n\n # if user specified categorical_features -> always use legacy mode\n if self.categorical_features is not None:\n if (isinstance(self.categorical_features, six.string_types)\n and self.categorical_features == 'all'):\n warnings.warn(\n \"The 'categorical_features' keyword is deprecated in \"\n \"version 0.20 and will be removed in 0.22. The passed \"\n \"value of 'all' is the default and can simply be removed.\",\n DeprecationWarning)\n else:\n if self.categories is not None:\n raise ValueError(\n \"The 'categorical_features' keyword is deprecated, \"\n \"and cannot be used together with specifying \"\n \"'categories'.\")\n warnings.warn(\n \"The 'categorical_features' keyword is deprecated in \"\n \"version 0.20 and will be removed in 0.22. You can \"\n \"use the ColumnTransformer instead.\", DeprecationWarning)\n self._legacy_mode = True\n self._categorical_features = self.categorical_features\n else:\n self._categorical_features = 'all'\n\n def fit(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data to determine the categories of each feature.\n\n Returns\n -------\n self\n \"\"\"\n if self.handle_unknown not in ('error', 'ignore'):\n msg = (\"handle_unknown should be either 'error' or 'ignore', \"\n \"got {0}.\".format(self.handle_unknown))\n raise ValueError(msg)\n\n self._handle_deprecations(X)\n\n if self._legacy_mode:\n _transform_selected(X, self._legacy_fit_transform, self.dtype,\n self._categorical_features,\n copy=True)\n return self\n else:\n self._fit(X, handle_unknown=self.handle_unknown)\n return self\n\n def _legacy_fit_transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n dtype = getattr(X, 'dtype', None)\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"OneHotEncoder in legacy mode cannot handle \"\n \"categories encoded as negative integers. \"\n \"Please set categories='auto' explicitly to \"\n \"be able to use arbitrary integer values as \"\n \"category identifiers.\")\n n_samples, n_features = X.shape\n if (isinstance(self.n_values, six.string_types) and\n self.n_values == 'auto'):\n n_values = np.max(X, axis=0) + 1\n elif isinstance(self.n_values, numbers.Integral):\n if (np.max(X, axis=0) >= self.n_values).any():\n raise ValueError(\"Feature out of bounds for n_values=%d\"\n % self.n_values)\n n_values = np.empty(n_features, dtype=np.int)\n n_values.fill(self.n_values)\n else:\n try:\n n_values = np.asarray(self.n_values, dtype=int)\n except (ValueError, TypeError):\n raise TypeError(\"Wrong type for parameter `n_values`. Expected\"\n \" 'auto', int or array of ints, got %r\"\n % type(X))\n if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:\n raise ValueError(\"Shape mismatch: if n_values is an array,\"\n \" it has to be of shape (n_features,).\")\n\n self._n_values_ = n_values\n self.categories_ = [np.arange(n_val - 1, dtype=dtype)\n for n_val in n_values]\n n_values = np.hstack([[0], n_values])\n indices = np.cumsum(n_values)\n self._feature_indices_ = indices\n\n column_indices = (X + indices[:-1]).ravel()\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)\n data = np.ones(n_samples * n_features)\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n\n if (isinstance(self.n_values, six.string_types) and\n self.n_values == 'auto'):\n mask = np.array(out.sum(axis=0)).ravel() != 0\n active_features = np.where(mask)[0]\n out = out[:, active_features]\n self._active_features_ = active_features\n\n self.categories_ = [\n np.unique(X[:, i]).astype(dtype) if dtype\n else np.unique(X[:, i]) for i in range(n_features)]\n\n return out if self.sparse else out.toarray()\n\n def fit_transform(self, X, y=None):\n \"\"\"Fit OneHotEncoder to X, then transform X.\n\n Equivalent to fit(X).transform(X) but more convenient.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data to encode.\n\n Returns\n -------\n X_out : sparse matrix if sparse=True else a 2-d array\n Transformed input.\n \"\"\"\n if self.handle_unknown not in ('error', 'ignore'):\n msg = (\"handle_unknown should be either 'error' or 'ignore', \"\n \"got {0}.\".format(self.handle_unknown))\n raise ValueError(msg)\n\n self._handle_deprecations(X)\n\n if self._legacy_mode:\n return _transform_selected(\n X, self._legacy_fit_transform, self.dtype,\n self._categorical_features, copy=True)\n else:\n return self.fit(X).transform(X)\n\n def _legacy_transform(self, X):\n \"\"\"Assumes X contains only categorical features.\"\"\"\n X = check_array(X, dtype=np.int)\n if np.any(X < 0):\n raise ValueError(\"OneHotEncoder in legacy mode cannot handle \"\n \"categories encoded as negative integers. \"\n \"Please set categories='auto' explicitly to \"\n \"be able to use arbitrary integer values as \"\n \"category identifiers.\")\n n_samples, n_features = X.shape\n\n indices = self._feature_indices_\n if n_features != indices.shape[0] - 1:\n raise ValueError(\"X has different shape than during fitting.\"\n \" Expected %d, got %d.\"\n % (indices.shape[0] - 1, n_features))\n\n # We use only those categorical features of X that are known using fit.\n # i.e lesser than n_values_ using mask.\n # This means, if self.handle_unknown is \"ignore\", the row_indices and\n # col_indices corresponding to the unknown categorical feature are\n # ignored.\n mask = (X < self._n_values_).ravel()\n if np.any(~mask):\n if self.handle_unknown not in ['error', 'ignore']:\n raise ValueError(\"handle_unknown should be either error or \"\n \"unknown got %s\" % self.handle_unknown)\n if self.handle_unknown == 'error':\n raise ValueError(\"unknown categorical feature present %s \"\n \"during transform.\" % X.ravel()[~mask])\n\n column_indices = (X + indices[:-1]).ravel()[mask]\n row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),\n n_features)[mask]\n data = np.ones(np.sum(mask))\n out = sparse.coo_matrix((data, (row_indices, column_indices)),\n shape=(n_samples, indices[-1]),\n dtype=self.dtype).tocsr()\n if (isinstance(self.n_values, six.string_types) and\n self.n_values == 'auto'):\n out = out[:, self._active_features_]\n\n return out if self.sparse else out.toarray()\n\n def _transform_new(self, X):\n \"\"\"New implementation assuming categorical input\"\"\"\n X_temp = check_array(X, dtype=None)\n if not hasattr(X, 'dtype') and np.issubdtype(X_temp.dtype, np.str_):\n X = check_array(X, dtype=np.object)\n else:\n X = X_temp\n\n n_samples, n_features = X.shape\n\n X_int, X_mask = self._transform(X, handle_unknown=self.handle_unknown)\n\n mask = X_mask.ravel()\n n_values = [cats.shape[0] for cats in self.categories_]\n n_values = np.array([0] + n_values)\n feature_indices = np.cumsum(n_values)\n\n indices = (X_int + feature_indices[:-1]).ravel()[mask]\n indptr = X_mask.sum(axis=1).cumsum()\n indptr = np.insert(indptr, 0, 0)\n data = np.ones(n_samples * n_features)[mask]\n\n out = sparse.csr_matrix((data, indices, indptr),\n shape=(n_samples, feature_indices[-1]),\n dtype=self.dtype)\n if not self.sparse:\n return out.toarray()\n else:\n return out\n\n def transform(self, X):\n \"\"\"Transform X using one-hot encoding.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data to encode.\n\n Returns\n -------\n X_out : sparse matrix if sparse=True else a 2-d array\n Transformed input.\n \"\"\"\n if self._legacy_mode:\n return _transform_selected(X, self._legacy_transform, self.dtype,\n self._categorical_features,\n copy=True)\n else:\n return self._transform_new(X)\n\n def inverse_transform(self, X):\n \"\"\"Convert the back data to the original representation.\n\n In case unknown categories are encountered (all zero's in the\n one-hot encoding), ``None`` is used to represent this category.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape [n_samples, n_encoded_features]\n The transformed data.\n\n Returns\n -------\n X_tr : array-like, shape [n_samples, n_features]\n Inverse transformed array.\n\n \"\"\"\n # if self._legacy_mode:\n # raise ValueError(\"only supported for categorical features\")\n\n check_is_fitted(self, 'categories_')\n X = check_array(X, accept_sparse='csr')\n\n n_samples, _ = X.shape\n n_features = len(self.categories_)\n n_transformed_features = sum([len(cats) for cats in self.categories_])\n\n # validate shape of passed X\n msg = (\"Shape of the passed X data is not correct. Expected {0} \"\n \"columns, got {1}.\")\n if X.shape[1] != n_transformed_features:\n raise ValueError(msg.format(n_transformed_features, X.shape[1]))\n\n # create resulting array of appropriate dtype\n dt = np.find_common_type([cat.dtype for cat in self.categories_], [])\n X_tr = np.empty((n_samples, n_features), dtype=dt)\n\n j = 0\n found_unknown = {}\n\n for i in range(n_features):\n n_categories = len(self.categories_[i])\n sub = X[:, j:j + n_categories]\n\n # for sparse X argmax returns 2D matrix, ensure 1D array\n labels = np.asarray(_argmax(sub, axis=1)).flatten()\n X_tr[:, i] = self.categories_[i][labels]\n\n if self.handle_unknown == 'ignore':\n # ignored unknown categories: we have a row of all zero's\n unknown = np.asarray(sub.sum(axis=1) == 0).flatten()\n if unknown.any():\n found_unknown[i] = unknown\n\n j += n_categories\n\n # if ignored are found: potentially need to upcast result to\n # insert None values\n if found_unknown:\n if X_tr.dtype != object:\n X_tr = X_tr.astype(object)\n\n for idx, mask in found_unknown.items():\n X_tr[mask, idx] = None\n\n return X_tr\n\n def get_feature_names(self, input_features=None):\n \"\"\"Return feature names for output features.\n\n Parameters\n ----------\n input_features : list of string, length n_features, optional\n String names for input features if available. By default,\n \"x0\", \"x1\", ... \"xn_features\" is used.\n\n Returns\n -------\n output_feature_names : array of string, length n_output_features\n\n \"\"\"\n check_is_fitted(self, 'categories_')\n cats = self.categories_\n if input_features is None:\n input_features = ['x%d' % i for i in range(len(cats))]\n elif(len(input_features) != len(self.categories_)):\n raise ValueError(\n \"input_features should have length equal to number of \"\n \"features ({}), got {}\".format(len(self.categories_),\n len(input_features)))\n\n feature_names = []\n for i in range(len(cats)):\n names = [\n input_features[i] + '_' + six.text_type(t) for t in cats[i]]\n feature_names.extend(names)\n\n return np.array(feature_names, dtype=object)\n\n\nclass OrdinalEncoder(_BaseEncoder):\n \"\"\"Encode categorical features as an integer array.\n\n The input to this transformer should be an array-like of integers or\n strings, denoting the values taken on by categorical (discrete) features.\n The features are converted to ordinal integers. This results in\n a single column of integers (0 to n_categories - 1) per feature.\n\n Read more in the :ref:`User Guide <preprocessing_categorical_features>`.\n\n Parameters\n ----------\n categories : 'auto' or a list of lists/arrays of values.\n Categories (unique values) per feature:\n\n - 'auto' : Determine categories automatically from the training data.\n - list : ``categories[i]`` holds the categories expected in the ith\n column. The passed categories should not mix strings and numeric\n values, and should be sorted in case of numeric values.\n\n The used categories can be found in the ``categories_`` attribute.\n\n dtype : number type, default np.float64\n Desired dtype of output.\n\n Attributes\n ----------\n categories_ : list of arrays\n The categories of each feature determined during fitting\n (in order of the features in X and corresponding with the output\n of ``transform``).\n\n Examples\n --------\n Given a dataset with two features, we let the encoder find the unique\n values per feature and transform the data to an ordinal encoding.\n\n >>> from sklearn.preprocessing import OrdinalEncoder\n >>> enc = OrdinalEncoder()\n >>> X = [['Male', 1], ['Female', 3], ['Female', 2]]\n >>> enc.fit(X)\n ... # doctest: +ELLIPSIS\n OrdinalEncoder(categories='auto', dtype=<... 'numpy.float64'>)\n >>> enc.categories_\n [array(['Female', 'Male'], dtype=object), array([1, 2, 3], dtype=object)]\n >>> enc.transform([['Female', 3], ['Male', 1]])\n array([[0., 2.],\n [1., 0.]])\n\n >>> enc.inverse_transform([[1, 0], [0, 1]])\n array([['Male', 1],\n ['Female', 2]], dtype=object)\n\n See also\n --------\n sklearn.preprocessing.OneHotEncoder : performs a one-hot encoding of\n categorical features.\n sklearn.preprocessing.LabelEncoder : encodes target labels with values\n between 0 and n_classes-1.\n \"\"\"\n\n def __init__(self, categories='auto', dtype=np.float64):\n self.categories = categories\n self.dtype = dtype\n\n def fit(self, X, y=None):\n \"\"\"Fit the OrdinalEncoder to X.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data to determine the categories of each feature.\n\n Returns\n -------\n self\n\n \"\"\"\n # base classes uses _categories to deal with deprecations in\n # OneHoteEncoder: can be removed once deprecations are removed\n self._categories = self.categories\n self._fit(X)\n\n return self\n\n def transform(self, X):\n \"\"\"Transform X to ordinal codes.\n\n Parameters\n ----------\n X : array-like, shape [n_samples, n_features]\n The data to encode.\n\n Returns\n -------\n X_out : sparse matrix or a 2-d array\n Transformed input.\n\n \"\"\"\n X_int, _ = self._transform(X)\n return X_int.astype(self.dtype, copy=False)\n\n def inverse_transform(self, X):\n \"\"\"Convert the data back to the original representation.\n\n Parameters\n ----------\n X : array-like or sparse matrix, shape [n_samples, n_encoded_features]\n The transformed data.\n\n Returns\n -------\n X_tr : array-like, shape [n_samples, n_features]\n Inverse transformed array.\n\n \"\"\"\n check_is_fitted(self, 'categories_')\n X = check_array(X, accept_sparse='csr')\n\n n_samples, _ = X.shape\n n_features = len(self.categories_)\n\n # validate shape of passed X\n msg = (\"Shape of the passed X data is not correct. Expected {0} \"\n \"columns, got {1}.\")\n if X.shape[1] != n_features:\n raise ValueError(msg.format(n_features, X.shape[1]))\n\n # create resulting array of appropriate dtype\n dt = np.find_common_type([cat.dtype for cat in self.categories_], [])\n X_tr = np.empty((n_samples, n_features), dtype=dt)\n\n for i in range(n_features):\n labels = X[:, i].astype('int64')\n X_tr[:, i] = self.categories_[i][labels]\n\n return X_tr\n"
] | [
[
"numpy.asarray",
"numpy.issubdtype",
"numpy.cumsum",
"numpy.dtype",
"numpy.all",
"numpy.max",
"numpy.zeros_like",
"numpy.any",
"numpy.where",
"numpy.hstack",
"scipy.sparse.coo_matrix",
"numpy.ones_like",
"numpy.unique",
"numpy.arange",
"numpy.insert",
"scipy.sparse.csr_matrix",
"numpy.find_common_type",
"numpy.array",
"numpy.sum",
"numpy.sort",
"numpy.ones",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
saulmoore1/tierpsy-tracker | [
"69630c90de2e8a0b70168790f9c1198a0a644b3c"
] | [
"tierpsy/analysis/split_fov/helper.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 7 17:50:47 2019\n\n@author: lferiani\n\"\"\"\n\n#%% import statements\nimport numpy as np\nimport pandas as pd\nfrom numpy.fft import fft2, ifft2, fftshift\n\n#%% constants\n\nWELLS_ATTRIBUTES = ['x','y','r','row','col',\n 'x_min','x_max','y_min','y_max',\n 'well_name', 'is_good_well']\n\n# dictionary to go from camera name to channel\n# to be updated as we get more copies of the LoopBio rig\nCAM2CH_DICT_legacy = {\"22594549\":'Ch1',\n \"22594548\":'Ch2',\n \"22594546\":'Ch3',\n \"22436248\":'Ch4',\n \"22594559\":'Ch5',\n \"22594547\":'Ch6'}\n\nCAM2CH_DICT = {\"22956818\":'Ch1', # Hydra01\n \"22956816\":'Ch2',\n \"22956813\":'Ch3',\n \"22956805\":'Ch4',\n \"22956807\":'Ch5',\n \"22956832\":'Ch6',\n \"22956839\":'Ch1', # Hydra02\n \"22956837\":'Ch2',\n \"22956836\":'Ch3',\n \"22956829\":'Ch4',\n \"22956822\":'Ch5',\n \"22956806\":'Ch6',\n \"22956814\":'Ch1', # Hydra03\n \"22956833\":'Ch2',\n \"22956819\":'Ch3',\n \"22956827\":'Ch4',\n \"22956823\":'Ch5',\n \"22956840\":'Ch6',\n \"22956812\":'Ch1', # Hydra04\n \"22956834\":'Ch2',\n \"22956817\":'Ch3',\n \"22956811\":'Ch4',\n \"22956831\":'Ch5',\n \"22956809\":'Ch6',\n \"22594559\":'Ch1', # Hydra05\n \"22594547\":'Ch2',\n \"22594546\":'Ch3',\n \"22436248\":'Ch4',\n \"22594549\":'Ch5',\n \"22594548\":'Ch6'}\n\n\n# this can't be a nice and simple dictionary because people may want to use\n# this info in the other direction\n\nCAM2CH_list = [('22956818', 'Ch1', 'Hydra01'), # Hydra01\n ('22956816', 'Ch2', 'Hydra01'),\n ('22956813', 'Ch3', 'Hydra01'),\n ('22956805', 'Ch4', 'Hydra01'),\n ('22956807', 'Ch5', 'Hydra01'),\n ('22956832', 'Ch6', 'Hydra01'),\n ('22956839', 'Ch1', 'Hydra02'), # Hydra02\n ('22956837', 'Ch2', 'Hydra02'),\n ('22956836', 'Ch3', 'Hydra02'),\n ('22956829', 'Ch4', 'Hydra02'),\n ('22956822', 'Ch5', 'Hydra02'),\n ('22956806', 'Ch6', 'Hydra02'),\n ('22956814', 'Ch1', 'Hydra03'), # Hydra03\n ('22956833', 'Ch2', 'Hydra03'),\n ('22956819', 'Ch3', 'Hydra03'),\n ('22956827', 'Ch4', 'Hydra03'),\n ('22956823', 'Ch5', 'Hydra03'),\n ('22956840', 'Ch6', 'Hydra03'),\n ('22956812', 'Ch1', 'Hydra04'), # Hydra04\n ('22956834', 'Ch2', 'Hydra04'),\n ('22956817', 'Ch3', 'Hydra04'),\n ('22956811', 'Ch4', 'Hydra04'),\n ('22956831', 'Ch5', 'Hydra04'),\n ('22956809', 'Ch6', 'Hydra04'),\n ('22594559', 'Ch1', 'Hydra05'), # Hydra05\n ('22594547', 'Ch2', 'Hydra05'),\n ('22594546', 'Ch3', 'Hydra05'),\n ('22436248', 'Ch4', 'Hydra05'),\n ('22594549', 'Ch5', 'Hydra05'),\n ('22594548', 'Ch6', 'Hydra05')]\n\nCAM2CH_df = pd.DataFrame(CAM2CH_list,\n columns=['camera_serial', 'channel', 'rig'])\n\n\n# dictionaries to go from channel/(col, row) to well name.\n# there will be many as it depends on total number of wells, upright/upsidedown,\n# and in case of the 48wp how many wells in the fov\n\nUPRIGHT_48WP_669999 = pd.DataFrame.from_dict({ ('Ch1',0):['A1','B1','C1'],\n ('Ch1',1):['A2','B2','C2'],\n ('Ch2',0):['D1','E1','F1'],\n ('Ch2',1):['D2','E2','F2'],\n ('Ch3',0):['A3','B3','C3'],\n ('Ch3',1):['A4','B4','C4'],\n ('Ch3',2):['A5','B5','C5'],\n ('Ch4',0):['D3','E3','F3'],\n ('Ch4',1):['D4','E4','F4'],\n ('Ch4',2):['D5','E5','F5'],\n ('Ch5',0):['A6','B6','C6'],\n ('Ch5',1):['A7','B7','C7'],\n ('Ch5',2):['A8','B8','C8'],\n ('Ch6',0):['D6','E6','F6'],\n ('Ch6',1):['D7','E7','F7'],\n ('Ch6',2):['D8','E8','F8']})\n\nUPRIGHT_96WP = pd.DataFrame.from_dict({('Ch1',0):[ 'A1', 'B1', 'C1', 'D1'],\n ('Ch1',1):[ 'A2', 'B2', 'C2', 'D2'],\n ('Ch1',2):[ 'A3', 'B3', 'C3', 'D3'],\n ('Ch1',3):[ 'A4', 'B4', 'C4', 'D4'],\n ('Ch2',0):[ 'E1', 'F1', 'G1', 'H1'],\n ('Ch2',1):[ 'E2', 'F2', 'G2', 'H2'],\n ('Ch2',2):[ 'E3', 'F3', 'G3', 'H3'],\n ('Ch2',3):[ 'E4', 'F4', 'G4', 'H4'],\n ('Ch3',0):[ 'A5', 'B5', 'C5', 'D5'],\n ('Ch3',1):[ 'A6', 'B6', 'C6', 'D6'],\n ('Ch3',2):[ 'A7', 'B7', 'C7', 'D7'],\n ('Ch3',3):[ 'A8', 'B8', 'C8', 'D8'],\n ('Ch4',0):[ 'E5', 'F5', 'G5', 'H5'],\n ('Ch4',1):[ 'E6', 'F6', 'G6', 'H6'],\n ('Ch4',2):[ 'E7', 'F7', 'G7', 'H7'],\n ('Ch4',3):[ 'E8', 'F8', 'G8', 'H8'],\n ('Ch5',0):[ 'A9', 'B9', 'C9', 'D9'],\n ('Ch5',1):['A10','B10','C10','D10'],\n ('Ch5',2):['A11','B11','C11','D11'],\n ('Ch5',3):['A12','B12','C12','D12'],\n ('Ch6',0):[ 'E9', 'F9', 'G9', 'H9'],\n ('Ch6',1):['E10','F10','G10','H10'],\n ('Ch6',2):['E11','F11','G11','H11'],\n ('Ch6',3):['E12','F12','G12','H12']})\n\n#%% functions\n\n\ndef get_mwp_map(total_n_wells, whichsideup):\n \"\"\"\n Given a total number of wells, and whether the multiwell plate\n is upright or upside-down, returns a dataframe with the correct\n channel/row/column -> well_name mapping\n (this works on the Hydra imaging systems - by LoopBio Gmbh - used in Andre\n Brown's lab)\n \"\"\"\n if total_n_wells==48 and whichsideup=='upright':\n return UPRIGHT_48WP_669999\n elif total_n_wells==96 and whichsideup=='upright':\n return UPRIGHT_96WP\n else:\n raise ValueError('This case has not been coded yet. ' + \\\n 'Please contact the devs or open a feature request on GitHub.')\n\n\ndef serial2rigchannel(camera_serial):\n \"\"\"\n Takes camera serial number, returns a (rig, channel) tuple\n \"\"\"\n out = CAM2CH_df[CAM2CH_df['camera_serial']==camera_serial]\n if len(out) == 0:\n raise ValueError('{} unknown as camera serial string'.format(camera_serial))\n elif len(out) == 1:\n return tuple(out[['rig','channel']].values[0])\n else:\n raise Exception('Multiple hits for {}. split_fov/helper.py corrupted?'.format(camera_serial))\n\n\ndef serial2channel(camera_serial):\n \"\"\"\n Takes camera serial number, returns the channel\n \"\"\"\n return serial2rigchannel(camera_serial)[1]\n\n\n\ndef parse_camera_serial(filename):\n import re\n regex = r\"(?<=20\\d{6}\\_\\d{6}\\.)\\d{8}\"\n camera_serial = re.findall(regex, str(filename).lower())[0]\n return camera_serial\n\n\ndef get_bgnd_from_masked(masked_image_file, is_use_existing=False):\n \"\"\"\n - Opens the masked_image_file hdf5 file, reads the /full_data node and\n creates a \"background\" by taking the maximum value of each pixel over time.\n - if is_use_existing, read instead the /bgnd field\n (and if /bgnd not there, fall back to method above)\n - Parses the file name to find a camera serial number\n - reads the pixel/um ratio from the masked_image_file\n \"\"\"\n import numpy as np\n from tierpsy.helper.params import read_unit_conversions\n\n # read attributes of masked_image_file\n _, (microns_per_pixel, xy_units) , is_light_background = read_unit_conversions(masked_image_file)\n # get \"background\" and px2um\n with pd.HDFStore(masked_image_file, 'r') as fid:\n assert is_light_background, \\\n 'MultiWell recognition is only available for brightfield at the moment'\n if is_use_existing and '/bgnd' in fid:\n print('bgnd found :) ')\n img = fid.get_node('/bgnd').read()\n else:\n img = np.max(fid.get_node('/full_data'), axis=0)\n\n camera_serial = parse_camera_serial(masked_image_file)\n\n return img, camera_serial, microns_per_pixel\n\n\ndef make_square_template(n_pxls=150, rel_width=0.8, blurring=0.1, dtype_out='float'):\n import numpy as np\n \"\"\"Function that creates a template that approximates a square well\"\"\"\n n_pxls = int(np.round(n_pxls))\n x = np.linspace(-0.5, 0.5, n_pxls)\n y = np.linspace(-0.5, 0.5, n_pxls)\n xx, yy = np.meshgrid(x, y, sparse=False, indexing='ij')\n\n # inspired by Mark Shattuck's function to make a colloid's template\n zz = (1 - np.tanh( (abs(xx)-rel_width/2)/blurring ))\n zz = zz * (1-np.tanh( (abs(yy)-rel_width/2)/blurring ))\n zz = zz/4\n\n # add bright border\n edge = int(0.05 * n_pxls)\n zz[:edge,:] = 1\n zz[-edge:,:] = 1\n zz[:,:edge] = 1\n zz[:,-edge:] = 1\n\n if dtype_out == 'uint8':\n zz *= 255\n zz = zz.astype(np.uint8)\n elif dtype_out == 'float':\n pass\n else:\n raise ValueError(\"Only 'float' and 'uint8' are valid dtypes for this\")\n\n\n return zz\n\n\n# def was_fov_split(timeseries_data):\n# \"\"\"\n# Check if the FOV was split, looking at timeseries_data\n# \"\"\"\n# if 'well_name' not in timeseries_data.columns:\n# # for some weird reason, save_feats_stats is being called on an old\n# # featuresN file without calling save_timeseries_feats_table first\n# is_fov_split = False\n# else:\n# # timeseries_data has been updated and now has a well_name column\n# if len(set(timeseries_data['well_name']) - set(['n/a'])) > 0:\n# is_fov_split = True\n# # print('have to split fov by well')\n# else:\n# assert all(timeseries_data['well_name']=='n/a'), \\\n# 'Something is wrong with well naming - go check save_feats_stats'\n# is_fov_split = False\n# return is_fov_split\n\ndef was_fov_split(fname):\n with pd.HDFStore(fname, 'r') as fid:\n is_fov_tosplit = ('/fov_wells' in fid)\n return is_fov_tosplit\n\n\ndef naive_normalise(img):\n m = img.min()\n M = img.max()\n return (img - m) / (M-m)\n\n\ndef fft_convolve2d(x,y):\n \"\"\" 2D convolution, using FFT\"\"\"\n fr = fft2(x)\n fr2 = fft2(y)\n cc = np.real(ifft2(fr*fr2))\n cc = fftshift(cc)\n return cc\n\n\ndef simulate_wells_lattice(img_shape, x_off, y_off, sp, nwells=None, template_shape='square'):\n \"\"\"\n Create mock fov by placing well templates onto a square lattice\n Very simply uses the input parameters and range to define where the wells\n will go, and then places the template in a padded canvas.\n The canvas is then cut to be of img_shape again.\n This simple approach works because the template is created to be exactly\n spacing large, so templates do not overlap\n \"\"\"\n\n # convert fractions into integers\n x_offset = int(x_off*img_shape[0])\n y_offset = int(y_off*img_shape[0])\n spacing = int(sp*img_shape[0])\n\n # create a padded empty canvas\n padding = img_shape[0]//2\n padding_times_2 = padding*2\n padded_shape = tuple(s+padding_times_2 for s in img_shape)\n padded_canvas = np.zeros(padded_shape)\n\n # determine where the wells wil go in the padded canvas\n if nwells is not None:\n r_wells = range(y_offset+padding,\n y_offset+padding+nwells*spacing,\n spacing)\n c_wells = range(x_offset+padding,\n x_offset+padding+nwells*spacing,\n spacing)\n else:\n r_wells = range(y_offset+padding,\n padding+img_shape[0],\n spacing)\n c_wells = range(x_offset+padding,\n padding+img_shape[1],\n spacing)\n tmpl_pos_in_padded_canvas = [(r,c) for r in r_wells for c in c_wells]\n\n # make the template for the wells\n tmpl = make_square_template(n_pxls=spacing,\n rel_width=0.7,\n blurring=0.1,\n dtype_out='float')\n # invert\n tmpl = 1-tmpl\n\n # place wells onto canvas\n ts = tmpl.shape[0]\n for r,c in tmpl_pos_in_padded_canvas:\n try:\n padded_canvas[r-ts//2:r-(-ts//2),\n c-ts//2:c-(-ts//2)] += tmpl\n except Exception as e:\n print(str(e))\n import pdb\n pdb.set_trace()\n\n cutout_canvas = padded_canvas[padding:padding+img_shape[0],\n padding:padding+img_shape[1]]\n cutout_canvas = naive_normalise(cutout_canvas)\n\n return cutout_canvas\n\n\ndef get_well_color(is_good_well, forCV=False):\n colors = {'undefined': (255, 127, 0),\n 'good_well': (77, 220, 74),\n 'bad_well': (255, 0, 0)}\n if np.isnan(is_good_well) or is_good_well==-1:\n color = colors['undefined']\n elif is_good_well == True or is_good_well==1:\n color = colors['good_well']\n elif is_good_well == False or is_good_well==0:\n color = colors['bad_well']\n else:\n print('is_good_well not NaN, True, False, -1, 1, 0. Debugging:')\n import pdb; pdb.set_trace()\n if not forCV:\n color = tuple(c/255.0 for c in color)\n return color\n\n\nif __name__ == '__main__':\n\n # test that camera serials return the correct channel\n serials_list = [line[0] for line in CAM2CH_list]\n# serials_list.append('22594540') # this raise an exception as it does not exist\n for serial in serials_list:\n print('{} -> {}'.format(serial, serial2channel(serial)))\n # that works as intended!\n\n # let's now check that the camera name is parsed correctly I guess\n from pathlib import Path\n src_dir = Path('/Users/lferiani/Desktop/Data_FOVsplitter/evgeny/MaskedVideos/20190808')\n masked_fnames = src_dir.rglob('*.hdf5')\n for fname in masked_fnames:\n camera_serial = parse_camera_serial(fname)\n print(fname)\n print(camera_serial)\n print(serial2channel(camera_serial))\n print(' ')\n # this too works perfectly... but I saw wrong data was written in the masked videos\n # so have to check what went wrong there\n"
] | [
[
"numpy.fft.fft2",
"numpy.fft.ifft2",
"numpy.linspace",
"numpy.isnan",
"pandas.DataFrame",
"numpy.fft.fftshift",
"numpy.round",
"pandas.HDFStore",
"pandas.DataFrame.from_dict",
"numpy.meshgrid",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
isabella232/agents | [
"b2ed02d20c43a4b789a4711f4653e8421f8ba526"
] | [
"tf_agents/experimental/examples/ppo/schulman17/train_eval_lib.py"
] | [
"# coding=utf-8\n# Copyright 2020 The TF-Agents Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Lint as: python3\nr\"\"\"Train/eval PPOClipAgent in Mujoco environments with (Schulman, 17) methods.\n\nTo reproduce (Schulman, 17), here we collect fixed length sequences of\n`collect_sequence_length` to store in the replay buffer and perform advantage\ncalculation on. Each sequence can span multiple episodes, separated by a\nboundary step. Bootstraping occurs during advantage calculation for the partial\nepisodes that are part of the sequences.\n\nNote that this isn't necessary. Alternatively, you could instead collect a\nspecified number of episodes in each training iteration. Each `item` stored in\nReverb tables is a full episode, and advantage calculation happens on full\nepisodes. As a result, no bootstrapping is required.\n\nAll hyperparameters come from the PPO paper\nhttps://arxiv.org/abs/1707.06347.pdf\n\"\"\"\nimport os\n\nfrom absl import logging\n\nimport gin\nimport reverb\nimport tensorflow.compat.v2 as tf\n\nfrom tf_agents.agents.ppo import ppo_clip_agent\nfrom tf_agents.environments import suite_mujoco\nfrom tf_agents.experimental.examples.ppo import ppo_learner\nfrom tf_agents.metrics import py_metrics\nfrom tf_agents.networks import actor_distribution_network\nfrom tf_agents.networks import value_network\nfrom tf_agents.policies import py_tf_eager_policy\nfrom tf_agents.replay_buffers import reverb_replay_buffer\nfrom tf_agents.replay_buffers import reverb_utils\nfrom tf_agents.train import actor\nfrom tf_agents.train import learner\nfrom tf_agents.train import triggers\nfrom tf_agents.train.utils import spec_utils\nfrom tf_agents.train.utils import train_utils\n\n\nclass ReverbFixedLengthSequenceObserver(\n reverb_utils.ReverbAddTrajectoryObserver):\n \"\"\"Reverb fixed length sequence observer.\n\n This is a specialized observer similar to ReverbAddTrajectoryObserver but each\n sequence contains a fixed number of steps and can span multiple episodes. This\n implementation is consistent with (Schulman, 17).\n\n **Note**: Counting of steps in drivers does not include boundary steps. To\n guarantee only 1 item is pushed to the replay when collecting n steps with a\n `sequence_length` of n make sure to set the `stride_length`.\n \"\"\"\n\n def __call__(self, trajectory):\n \"\"\"Writes the trajectory into the underlying replay buffer.\n\n Allows trajectory to be a flattened trajectory. No batch dimension allowed.\n\n Args:\n trajectory: The trajectory to be written which could be (possibly nested)\n trajectory object or a flattened version of a trajectory. It assumes\n there is *no* batch dimension.\n \"\"\"\n self._writer.append(trajectory)\n self._cached_steps += 1\n\n self._write_cached_steps()\n\n\[email protected]\ndef train_eval(\n root_dir,\n env_name='HalfCheetah-v2',\n # Training params\n num_iterations=1600,\n actor_fc_layers=(64, 64),\n value_fc_layers=(64, 64),\n learning_rate=3e-4,\n collect_sequence_length=2048,\n minibatch_size=64,\n num_epochs=10,\n # Agent params\n importance_ratio_clipping=0.2,\n lambda_value=0.95,\n discount_factor=0.99,\n entropy_regularization=0.,\n value_pred_loss_coef=0.5,\n use_gae=True,\n use_td_lambda_return=True,\n gradient_clipping=0.5,\n value_clipping=None,\n # Replay params\n reverb_port=None,\n replay_capacity=10000,\n # Others\n policy_save_interval=5000,\n summary_interval=1000,\n eval_interval=10000,\n eval_episodes=100,\n debug_summaries=False,\n summarize_grads_and_vars=False):\n \"\"\"Trains and evaluates PPO (Importance Ratio Clipping).\n\n Args:\n root_dir: Main directory path where checkpoints, saved_models, and summaries\n will be written to.\n env_name: Name for the Mujoco environment to load.\n num_iterations: The number of iterations to perform collection and training.\n actor_fc_layers: List of fully_connected parameters for the actor network,\n where each item is the number of units in the layer.\n value_fc_layers: : List of fully_connected parameters for the value network,\n where each item is the number of units in the layer.\n learning_rate: Learning rate used on the Adam optimizer.\n collect_sequence_length: Number of steps to take in each collect run.\n minibatch_size: Number of elements in each mini batch. If `None`, the entire\n collected sequence will be treated as one batch.\n num_epochs: Number of iterations to repeat over all collected data per data\n collection step. (Schulman,2017) sets this to 10 for Mujoco, 15 for\n Roboschool and 3 for Atari.\n importance_ratio_clipping: Epsilon in clipped, surrogate PPO objective. For\n more detail, see explanation at the top of the doc.\n lambda_value: Lambda parameter for TD-lambda computation.\n discount_factor: Discount factor for return computation. Default to `0.99`\n which is the value used for all environments from (Schulman, 2017).\n entropy_regularization: Coefficient for entropy regularization loss term.\n Default to `0.0` because no entropy bonus was used in (Schulman, 2017).\n value_pred_loss_coef: Multiplier for value prediction loss to balance with\n policy gradient loss. Default to `0.5`, which was used for all\n environments in the OpenAI baseline implementation. This parameters is\n irrelevant unless you are sharing part of actor_net and value_net. In that\n case, you would want to tune this coeeficient, whose value depends on the\n network architecture of your choice.\n use_gae: If True (default False), uses generalized advantage estimation for\n computing per-timestep advantage. Else, just subtracts value predictions\n from empirical return.\n use_td_lambda_return: If True (default False), uses td_lambda_return for\n training value function; here: `td_lambda_return = gae_advantage +\n value_predictions`. `use_gae` must be set to `True` as well to enable TD\n -lambda returns. If `use_td_lambda_return` is set to True while\n `use_gae` is False, the empirical return will be used and a warning will\n be logged.\n gradient_clipping: Norm length to clip gradients.\n value_clipping: Difference between new and old value predictions are clipped\n to this threshold. Value clipping could be helpful when training\n very deep networks. Default: no clipping.\n reverb_port: Port for reverb server, if None, use a randomly chosen unused\n port.\n replay_capacity: The maximum number of elements for the replay buffer. Items\n will be wasted if this is smalled than collect_sequence_length.\n policy_save_interval: How often, in train_steps, the policy will be saved.\n summary_interval: How often to write data into Tensorboard.\n eval_interval: How often to run evaluation, in train_steps.\n eval_episodes: Number of episodes to evaluate over.\n debug_summaries: Boolean for whether to gather debug summaries.\n summarize_grads_and_vars: If true, gradient summaries will be written.\n \"\"\"\n collect_env = suite_mujoco.load(env_name)\n eval_env = suite_mujoco.load(env_name)\n num_environments = 1\n\n observation_tensor_spec, action_tensor_spec, time_step_tensor_spec = (\n spec_utils.get_tensor_specs(collect_env))\n # TODO(b/172267869): Remove this conversion once TensorNormalizer stops\n # converting float64 inputs to float32.\n observation_tensor_spec = tf.TensorSpec(\n dtype=tf.float32, shape=observation_tensor_spec.shape)\n\n train_step = train_utils.create_train_step()\n\n actor_net = actor_distribution_network.ActorDistributionNetwork(\n observation_tensor_spec,\n action_tensor_spec,\n fc_layer_params=actor_fc_layers,\n activation_fn=tf.nn.tanh,\n kernel_initializer=tf.keras.initializers.Orthogonal())\n value_net = value_network.ValueNetwork(\n observation_tensor_spec,\n fc_layer_params=value_fc_layers,\n kernel_initializer=tf.keras.initializers.Orthogonal())\n\n current_iteration = tf.Variable(0, dtype=tf.int64)\n def learning_rate_fn():\n # Linearly decay the learning rate.\n return learning_rate * (1 - current_iteration / num_iterations)\n\n agent = ppo_clip_agent.PPOClipAgent(\n time_step_tensor_spec,\n action_tensor_spec,\n optimizer=tf.compat.v1.train.AdamOptimizer(\n learning_rate=learning_rate_fn, epsilon=1e-5),\n actor_net=actor_net,\n value_net=value_net,\n importance_ratio_clipping=importance_ratio_clipping,\n lambda_value=lambda_value,\n discount_factor=discount_factor,\n entropy_regularization=entropy_regularization,\n value_pred_loss_coef=value_pred_loss_coef,\n # This is a legacy argument for the number of times we repeat the data\n # inside of the train function, incompatible with mini batch learning.\n # We set the epoch number from the replay buffer and tf.Data instead.\n num_epochs=1,\n use_gae=use_gae,\n use_td_lambda_return=use_td_lambda_return,\n gradient_clipping=gradient_clipping,\n value_clipping=value_clipping,\n # TODO(b/150244758): Default compute_value_and_advantage_in_train to False\n # after Reverb open source.\n compute_value_and_advantage_in_train=False,\n # Skips updating normalizers in the agent, as it's handled in the learner.\n update_normalizers_in_train=False,\n debug_summaries=debug_summaries,\n summarize_grads_and_vars=summarize_grads_and_vars,\n train_step_counter=train_step)\n agent.initialize()\n\n reverb_server = reverb.Server(\n [\n reverb.Table( # Replay buffer storing experience for training.\n name='training_table',\n sampler=reverb.selectors.Fifo(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n max_size=replay_capacity,\n max_times_sampled=1,\n ),\n reverb.Table( # Replay buffer storing experience for normalization.\n name='normalization_table',\n sampler=reverb.selectors.Fifo(),\n remover=reverb.selectors.Fifo(),\n rate_limiter=reverb.rate_limiters.MinSize(1),\n max_size=replay_capacity,\n max_times_sampled=1,\n )\n ],\n port=reverb_port)\n\n # Create the replay buffer.\n reverb_replay_train = reverb_replay_buffer.ReverbReplayBuffer(\n agent.collect_data_spec,\n sequence_length=collect_sequence_length,\n table_name='training_table',\n server_address='localhost:{}'.format(reverb_server.port),\n # The only collected sequence is used to populate the batches.\n max_cycle_length=1,\n rate_limiter_timeout_ms=1000)\n reverb_replay_normalization = reverb_replay_buffer.ReverbReplayBuffer(\n agent.collect_data_spec,\n sequence_length=collect_sequence_length,\n table_name='normalization_table',\n server_address='localhost:{}'.format(reverb_server.port),\n # The only collected sequence is used to populate the batches.\n max_cycle_length=1,\n rate_limiter_timeout_ms=1000)\n\n rb_observer = ReverbFixedLengthSequenceObserver(\n reverb_replay_train.py_client, ['training_table', 'normalization_table'],\n sequence_length=collect_sequence_length,\n stride_length=collect_sequence_length)\n\n saved_model_dir = os.path.join(root_dir, learner.POLICY_SAVED_MODEL_DIR)\n collect_env_step_metric = py_metrics.EnvironmentSteps()\n learning_triggers = [\n triggers.PolicySavedModelTrigger(\n saved_model_dir,\n agent,\n train_step,\n interval=policy_save_interval,\n metadata_metrics={\n triggers.ENV_STEP_METADATA_KEY: collect_env_step_metric\n }),\n triggers.StepPerSecondLogTrigger(train_step, interval=summary_interval),\n ]\n\n def training_dataset_fn():\n return reverb_replay_train.as_dataset(\n sample_batch_size=num_environments,\n sequence_preprocess_fn=agent.preprocess_sequence)\n\n def normalization_dataset_fn():\n return reverb_replay_normalization.as_dataset(\n sample_batch_size=num_environments,\n sequence_preprocess_fn=agent.preprocess_sequence)\n\n agent_learner = ppo_learner.PPOLearner(\n root_dir,\n train_step,\n agent,\n experience_dataset_fn=training_dataset_fn,\n normalization_dataset_fn=normalization_dataset_fn,\n num_batches=1,\n num_epochs=num_epochs,\n minibatch_size=minibatch_size,\n shuffle_buffer_size=collect_sequence_length,\n triggers=learning_triggers)\n\n tf_collect_policy = agent.collect_policy\n collect_policy = py_tf_eager_policy.PyTFEagerPolicy(\n tf_collect_policy, use_tf_function=True)\n\n collect_actor = actor.Actor(\n collect_env,\n collect_policy,\n train_step,\n steps_per_run=collect_sequence_length,\n observers=[rb_observer],\n metrics=actor.collect_metrics(buffer_size=10) + [collect_env_step_metric],\n reference_metrics=[collect_env_step_metric],\n summary_dir=os.path.join(root_dir, learner.TRAIN_DIR),\n summary_interval=summary_interval)\n\n eval_greedy_policy = py_tf_eager_policy.PyTFEagerPolicy(\n agent.policy, use_tf_function=True)\n\n if eval_interval:\n logging.info('Intial evaluation.')\n eval_actor = actor.Actor(\n eval_env,\n eval_greedy_policy,\n train_step,\n metrics=actor.eval_metrics(eval_episodes),\n reference_metrics=[collect_env_step_metric],\n summary_dir=os.path.join(root_dir, 'eval'),\n episodes_per_run=eval_episodes)\n\n eval_actor.run_and_log()\n\n logging.info('Training on %s', env_name)\n last_eval_step = 0\n for i in range(num_iterations):\n collect_actor.run()\n # TODO(b/159615593): Update to use observer.flush.\n # Reset the reverb observer to make sure the data collected is flushed and\n # written to the RB.\n rb_observer.reset()\n agent_learner.run()\n reverb_replay_train.clear()\n reverb_replay_normalization.clear()\n current_iteration.assign_add(1)\n\n # Eval only if `eval_interval` has been set. Then, eval if the current train\n # step is equal or greater than the `last_eval_step` + `eval_interval` or if\n # this is the last iteration. This logic exists because agent_learner.run()\n # does not return after every train step.\n if (eval_interval and\n (agent_learner.train_step_numpy >= eval_interval + last_eval_step\n or i == num_iterations - 1)):\n logging.info('Evaluating.')\n eval_actor.run_and_log()\n last_eval_step = agent_learner.train_step_numpy\n\n rb_observer.close()\n reverb_server.stop()\n"
] | [
[
"tensorflow.compat.v2.keras.initializers.Orthogonal",
"tensorflow.compat.v2.Variable",
"tensorflow.compat.v2.compat.v1.train.AdamOptimizer",
"tensorflow.compat.v2.TensorSpec"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Gusb3ll/tsukiuncen | [
"2ae6bcac3cd3c8f224d179299ef46afd9ec87b24"
] | [
"Modules/HentAI/mrcnn/visualize.py"
] | [
"\"\"\"\nMask R-CNN\nDisplay and Visualization Functions.\n\nCopyright (c) 2017 Matterport, Inc.\nLicensed under the MIT License (see LICENSE for details)\nWritten by Waleed Abdulla\n\"\"\"\n\nimport os\nimport sys\nimport random\nimport itertools\nimport colorsys\n\nimport numpy as np\nfrom skimage.measure import find_contours\nimport matplotlib.pyplot as plt\nfrom matplotlib import patches, lines\nfrom matplotlib.patches import Polygon\nimport IPython.display\n\n# Root directory of the project\nROOT_DIR = os.path.abspath(\"../\")\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn import utils\n\n\n############################################################\n# Visualization\n############################################################\n\ndef display_images(images, titles=None, cols=4, cmap=None, norm=None, interpolation=None):\n \"\"\"Display the given set of images, optionally with titles.\n images: list or array of image tensors in HWC format.\n titles: optional. A list of titles to display with each image.\n cols: number of images per row\n cmap: Optional. Color map to use. For example, \"Blues\".\n norm: Optional. A Normalize instance to map values to colors.\n interpolation: Optional. Image interpolation to use for display.\n \"\"\"\n titles = titles if titles is not None else [\"\"] * len(images)\n rows = len(images) // cols + 1\n plt.figure(figsize=(14, 14 * rows // cols))\n i = 1\n for image, title in zip(images, titles):\n plt.subplot(rows, cols, i)\n plt.title(title, fontsize=9)\n plt.axis('off')\n plt.imshow(image.astype(np.uint8), cmap=cmap, norm=norm, interpolation=interpolation)\n i += 1\n plt.show()\n\n\ndef random_colors(N, bright=True):\n \"\"\"\n Generate random colors.\n To get visually distinct colors, generate them in HSV space then\n convert to RGB.\n \"\"\"\n brightness = 1.0 if bright else 0.7\n hsv = [(i / N, 1, brightness) for i in range(N)]\n colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))\n random.shuffle(colors)\n return colors\n\n\ndef apply_mask(image, mask, color, alpha=0.5):\n \"\"\"Apply the given mask to the image.\n \"\"\"\n for c in range(3):\n image[:, :, c] = np.where(mask == 1, image[:, :, c] * (1 - alpha) + alpha * color[c] * 255, image[:, :, c])\n return image\n\n\ndef display_instances(image, boxes, masks, class_ids, class_names, scores=None, title=\"\", figsize=(16, 16), ax=None, show_mask=True, show_bbox=True, colors=None, captions=None):\n \"\"\"\n boxes: [num_instance, (y1, x1, y2, x2, class_id)] in image coordinates.\n masks: [height, width, num_instances]\n class_ids: [num_instances]\n class_names: list of class names of the dataset\n scores: (optional) confidence scores for each box\n title: (optional) Figure title\n show_mask, show_bbox: To show masks and bounding boxes or not\n figsize: (optional) the size of the image\n colors: (optional) An array or colors to use with each object\n captions: (optional) A list of strings to use as captions for each object\n \"\"\"\n # Number of instances\n N = boxes.shape[0]\n if not N:\n print(\"\\n*** No instances to display *** \\n\")\n else:\n assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0]\n\n # If no axis is passed, create one and automatically call show()\n auto_show = False\n if not ax:\n _, ax = plt.subplots(1, figsize=figsize)\n auto_show = True\n\n # Generate random colors\n colors = colors or random_colors(N)\n\n # Show area outside image boundaries.\n height, width = image.shape[:2]\n ax.set_ylim(height + 10, -10)\n ax.set_xlim(-10, width + 10)\n ax.axis('off')\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n color = colors[i]\n\n # Bounding box\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in image cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n if show_bbox:\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2,\n alpha=0.7, linestyle=\"dashed\",\n edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Label\n if not captions:\n class_id = class_ids[i]\n score = scores[i] if scores is not None else None\n label = class_names[class_id]\n caption = \"{} {:.3f}\".format(label, score) if score else label\n else:\n caption = captions[i]\n ax.text(x1, y1 + 8, caption,\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n mask = masks[:, :, i]\n if show_mask:\n masked_image = apply_mask(masked_image, mask, color)\n\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n if auto_show:\n plt.show()\n\n\ndef display_differences(image,\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n class_names, title=\"\", ax=None,\n show_mask=True, show_box=True,\n iou_threshold=0.5, score_threshold=0.5):\n \"\"\"Display ground truth and prediction instances on the same image.\"\"\"\n # Match predictions to ground truth\n gt_match, pred_match, overlaps = utils.compute_matches(\n gt_box, gt_class_id, gt_mask,\n pred_box, pred_class_id, pred_score, pred_mask,\n iou_threshold=iou_threshold, score_threshold=score_threshold)\n # Ground truth = green. Predictions = red\n colors = [(0, 1, 0, .8)] * len(gt_match)\\\n + [(1, 0, 0, 1)] * len(pred_match)\n # Concatenate GT and predictions\n class_ids = np.concatenate([gt_class_id, pred_class_id])\n scores = np.concatenate([np.zeros([len(gt_match)]), pred_score])\n boxes = np.concatenate([gt_box, pred_box])\n masks = np.concatenate([gt_mask, pred_mask], axis=-1)\n # Captions per instance show score/IoU\n captions = [\"\" for m in gt_match] + [\"{:.2f} / {:.2f}\".format(\n pred_score[i],\n (overlaps[i, int(pred_match[i])]\n if pred_match[i] > -1 else overlaps[i].max()))\n for i in range(len(pred_match))]\n # Set title if not provided\n title = title or \"Ground Truth and Detections\\n GT=green, pred=red, captions: score/IoU\"\n # Display\n display_instances(\n image,\n boxes, masks, class_ids,\n class_names, scores, ax=ax,\n show_bbox=show_box, show_mask=show_mask,\n colors=colors, captions=captions,\n title=title)\n\n\ndef draw_rois(image, rois, refined_rois, mask, class_ids, class_names, limit=10):\n \"\"\"\n anchors: [n, (y1, x1, y2, x2)] list of anchors in image coordinates.\n proposals: [n, 4] the same anchors but refined to fit objects better.\n \"\"\"\n masked_image = image.copy()\n\n # Pick random anchors in case there are too many.\n ids = np.arange(rois.shape[0], dtype=np.int32)\n ids = np.random.choice(\n ids, limit, replace=False) if ids.shape[0] > limit else ids\n\n fig, ax = plt.subplots(1, figsize=(12, 12))\n if rois.shape[0] > limit:\n plt.title(\"Showing {} random ROIs out of {}\".format(\n len(ids), rois.shape[0]))\n else:\n plt.title(\"{} ROIs\".format(len(ids)))\n\n # Show area outside image boundaries.\n ax.set_ylim(image.shape[0] + 20, -20)\n ax.set_xlim(-50, image.shape[1] + 20)\n ax.axis('off')\n\n for i, id in enumerate(ids):\n color = np.random.rand(3)\n class_id = class_ids[id]\n # ROI\n y1, x1, y2, x2 = rois[id]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, edgecolor=color if class_id else \"gray\", facecolor='none', linestyle=\"dashed\")\n ax.add_patch(p)\n # Refined ROI\n if class_id:\n ry1, rx1, ry2, rx2 = refined_rois[id]\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal for easy visualization\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Label\n label = class_names[class_id]\n ax.text(rx1, ry1 + 8, \"{}\".format(label),\n color='w', size=11, backgroundcolor=\"none\")\n\n # Mask\n m = utils.unmold_mask(mask[id], rois[id] [:4].astype(np.int32), image.shape)\n masked_image = apply_mask(masked_image, m, color)\n\n ax.imshow(masked_image)\n\n # Print stats\n print(\"Positive ROIs: \", class_ids[class_ids > 0].shape[0])\n print(\"Negative ROIs: \", class_ids[class_ids == 0].shape[0])\n print(\"Positive Ratio: {:.2f}\".format(\n class_ids[class_ids > 0].shape[0] / class_ids.shape[0]))\n\n\ndef draw_box(image, box, color):\n \"\"\"Draw 3-pixel width bounding boxes on the given image array.\n color: list of 3 int values for RGB.\n \"\"\"\n y1, x1, y2, x2 = box\n image[y1:y1 + 2, x1:x2] = color\n image[y2:y2 + 2, x1:x2] = color\n image[y1:y2, x1:x1 + 2] = color\n image[y1:y2, x2:x2 + 2] = color\n return image\n\n\ndef display_top_masks(image, mask, class_ids, class_names, limit=4):\n \"\"\"Display the given image and the top few class masks.\"\"\"\n to_display = []\n titles = []\n to_display.append(image)\n titles.append(\"H x W={}x{}\".format(image.shape[0], image.shape[1]))\n # Pick top prominent classes in this image\n unique_class_ids = np.unique(class_ids)\n mask_area = [np.sum(mask[:, :, np.where(class_ids == i)[0]]) for i in unique_class_ids]\n top_ids = [v[0] for v in sorted(zip(unique_class_ids, mask_area),\n key=lambda r: r[1], reverse=True) if v[1] > 0]\n # Generate images and titles\n for i in range(limit):\n class_id = top_ids[i] if i < len(top_ids) else -1\n # Pull masks of instances belonging to the same class.\n m = mask[:, :, np.where(class_ids == class_id)[0]]\n m = np.sum(m * np.arange(1, m.shape[-1] + 1), -1)\n to_display.append(m)\n titles.append(class_names[class_id] if class_id != -1 else \"-\")\n display_images(to_display, titles=titles, cols=limit + 1, cmap=\"Blues_r\")\n\n\ndef plot_precision_recall(AP, precisions, recalls):\n \"\"\"Draw the precision-recall curve.\n\n AP: Average precision at IoU >= 0.5\n precisions: list of precision values\n recalls: list of recall values\n \"\"\"\n # Plot the Precision-Recall curve\n _, ax = plt.subplots(1)\n ax.set_title(\"Precision-Recall Curve. AP@50 = {:.3f}\".format(AP))\n ax.set_ylim(0, 1.1)\n ax.set_xlim(0, 1.1)\n _ = ax.plot(recalls, precisions)\n\n\ndef plot_overlaps(gt_class_ids, pred_class_ids, pred_scores, overlaps, class_names, threshold=0.5):\n \"\"\"Draw a grid showing how ground truth objects are classified.\n gt_class_ids: [N] int. Ground truth class IDs\n pred_class_id: [N] int. Predicted class IDs\n pred_scores: [N] float. The probability scores of predicted classes\n overlaps: [pred_boxes, gt_boxes] IoU overlaps of predictions and GT boxes.\n class_names: list of all class names in the dataset\n threshold: Float. The prediction probability required to predict a class\n \"\"\"\n gt_class_ids = gt_class_ids[gt_class_ids != 0]\n pred_class_ids = pred_class_ids[pred_class_ids != 0]\n\n plt.figure(figsize=(12, 10))\n plt.imshow(overlaps, interpolation='nearest', cmap=plt.cm.Blues)\n plt.yticks(np.arange(len(pred_class_ids)), [\"{} ({:.2f})\".format(class_names[int(id)], pred_scores[i])\n for i, id in enumerate(pred_class_ids)])\n plt.xticks(np.arange(len(gt_class_ids)), [class_names[int(id)] for id in gt_class_ids], rotation=90)\n\n thresh = overlaps.max() / 2.\n for i, j in itertools.product(range(overlaps.shape[0]), range(overlaps.shape[1])):\n text = \"\"\n if overlaps[i, j] > threshold:\n text = \"match\" if gt_class_ids[j] == pred_class_ids[i] else \"wrong\"\n color = (\"white\" if overlaps[i, j] > thresh else \"black\" if overlaps[i, j] > 0 else \"grey\")\n plt.text(j, i, \"{:.3f}\\n{}\".format(overlaps[i, j], text), horizontalalignment=\"center\", verticalalignment=\"center\", fontsize=9, color=color)\n\n plt.tight_layout()\n plt.xlabel(\"Ground Truth\")\n plt.ylabel(\"Predictions\")\n\n\ndef draw_boxes(image, boxes=None, refined_boxes=None, masks=None, captions=None, visibilities=None, title=\"\", ax=None):\n \"\"\"Draw bounding boxes and segmentation masks with different\n customizations.\n\n boxes: [N, (y1, x1, y2, x2, class_id)] in image coordinates.\n refined_boxes: Like boxes, but draw with solid lines to show\n that they're the result of refining 'boxes'.\n masks: [N, height, width]\n captions: List of N titles to display on each box\n visibilities: (optional) List of values of 0, 1, or 2. Determine how\n prominent each bounding box should be.\n title: An optional title to show over the image\n ax: (optional) Matplotlib axis to draw on.\n \"\"\"\n # Number of boxes\n assert boxes is not None or refined_boxes is not None\n N = boxes.shape[0] if boxes is not None else refined_boxes.shape[0]\n\n # Matplotlib Axis\n if not ax:\n _, ax = plt.subplots(1, figsize=(12, 12))\n\n # Generate random colors\n colors = random_colors(N)\n\n # Show area outside image boundaries.\n margin = image.shape[0] // 10\n ax.set_ylim(image.shape[0] + margin, -margin)\n ax.set_xlim(-margin, image.shape[1] + margin)\n ax.axis('off')\n\n ax.set_title(title)\n\n masked_image = image.astype(np.uint32).copy()\n for i in range(N):\n # Box visibility\n visibility = visibilities[i] if visibilities is not None else 1\n if visibility == 0:\n color = \"gray\"\n style = \"dotted\"\n alpha = 0.5\n elif visibility == 1:\n color = colors[i]\n style = \"dotted\"\n alpha = 1\n elif visibility == 2:\n color = colors[i]\n style = \"solid\"\n alpha = 1\n\n # Boxes\n if boxes is not None:\n if not np.any(boxes[i]):\n # Skip this instance. Has no bbox. Likely lost in cropping.\n continue\n y1, x1, y2, x2 = boxes[i]\n p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=alpha, linestyle=style, edgecolor=color, facecolor='none')\n ax.add_patch(p)\n\n # Refined boxes\n if refined_boxes is not None and visibility > 0:\n ry1, rx1, ry2, rx2 = refined_boxes[i].astype(np.int32)\n p = patches.Rectangle((rx1, ry1), rx2 - rx1, ry2 - ry1, linewidth=2, edgecolor=color, facecolor='none')\n ax.add_patch(p)\n # Connect the top-left corners of the anchor and proposal\n if boxes is not None:\n ax.add_line(lines.Line2D([x1, rx1], [y1, ry1], color=color))\n\n # Captions\n if captions is not None:\n caption = captions[i]\n # If there are refined boxes, display captions on them\n if refined_boxes is not None:\n y1, x1, y2, x2 = ry1, rx1, ry2, rx2\n ax.text(x1, y1, caption, size=11, verticalalignment='top',\n color='w', backgroundcolor=\"none\",\n bbox={'facecolor': color, 'alpha': 0.5, 'pad': 2, 'edgecolor': 'none'}\n )\n\n # Masks\n if masks is not None:\n mask = masks[:, :, i]\n masked_image = apply_mask(masked_image, mask, color)\n # Mask Polygon\n # Pad to ensure proper polygons for masks that touch image edges.\n padded_mask = np.zeros(\n (mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8)\n padded_mask[1:-1, 1:-1] = mask\n contours = find_contours(padded_mask, 0.5)\n for verts in contours:\n # Subtract the padding and flip (y, x) to (x, y)\n verts = np.fliplr(verts) - 1\n p = Polygon(verts, facecolor=\"none\", edgecolor=color)\n ax.add_patch(p)\n ax.imshow(masked_image.astype(np.uint8))\n\n\ndef display_table(table):\n \"\"\"Display values in a table format.\n table: an iterable of rows, and each row is an iterable of values.\n \"\"\"\n html = \"\"\n for row in table:\n row_html = \"\"\n for col in row:\n row_html += \"<td>{:40}</td>\".format(str(col))\n html += \"<tr>\" + row_html + \"</tr>\"\n html = \"<table>\" + html + \"</table>\"\n IPython.display.display(IPython.display.HTML(html))\n\n\ndef display_weight_stats(model):\n \"\"\"Scans all the weights in the model and returns a list of tuples\n that contain stats about each weight.\n \"\"\"\n layers = model.get_trainable_layers()\n table = [[\"WEIGHT NAME\", \"SHAPE\", \"MIN\", \"MAX\", \"STD\"]]\n for l in layers:\n weight_values = l.get_weights() # list of Numpy arrays\n weight_tensors = l.weights # list of TF tensors\n for i, w in enumerate(weight_values):\n weight_name = weight_tensors[i].name\n # Detect problematic layers. Exclude biases of conv layers.\n alert = \"\"\n if w.min() == w.max() and not (l.__class__.__name__ == \"Conv2D\" and i == 1):\n alert += \"<span style='color:red'>*** dead?</span>\"\n if np.abs(w.min()) > 1000 or np.abs(w.max()) > 1000:\n alert += \"<span style='color:red'>*** Overflow?</span>\"\n # Add row\n table.append([\n weight_name + alert,\n str(w.shape),\n \"{:+9.4f}\".format(w.min()),\n \"{:+10.4f}\".format(w.max()),\n \"{:+9.4f}\".format(w.std()),\n ])\n display_table(table)\n"
] | [
[
"matplotlib.pyplot.imshow",
"numpy.concatenate",
"numpy.any",
"numpy.where",
"matplotlib.patches.Polygon",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.fliplr",
"numpy.arange",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.axis",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.random.choice",
"matplotlib.patches.Rectangle",
"numpy.random.rand",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"matplotlib.lines.Line2D",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yashyenugu/pensieve | [
"406174bef0957449cda1d9f0d7ca988e02f6bc16"
] | [
"sim/multi_agent_gs.py"
] | [
"import threading\nfrom time import gmtime, sleep, strftime, time\nimport load_trace\nfrom a3c_gs import ActorNetwork, CriticNetwork, compute_gradients\nfrom run_tests import run_tests\nimport env\nimport tensorflow as tf\nimport os\nimport logging\nimport numpy as np\nimport multiprocessing as mp\nfrom Queue import Queue\nimport pickle\nimport gc\n# from rl_test_gs import run_tests\nos.environ['CUDA_VISIBLE_DEVICES'] = ''\n\n\n# bit_rate, buffer_size, next_chunk_size, bandwidth_measurement(throughput and time), chunk_til_video_end\nS_INFO = 6\nS_LEN = 8 # take how many frames in the past\nA_DIM = 6\nACTOR_LR_RATE = 0.0001\nCRITIC_LR_RATE = 0.001\nNUM_AGENTS = 16\nTRAIN_SEQ_LEN = 100 # take as a train batch\nMODEL_SAVE_INTERVAL = 100\nVIDEO_BIT_RATE = [300, 750, 1200, 1850, 2850, 4300] # Kbps\nHD_REWARD = [1, 2, 3, 12, 15, 20]\nBUFFER_NORM_FACTOR = 10.0\nCHUNK_TIL_VIDEO_END_CAP = 48.0\nM_IN_K = 1000.0\nREBUF_PENALTY = 4.3 # 1 sec rebuffering -> 3 Mbps\nSMOOTH_PENALTY = 1\nDEFAULT_QUALITY = 1 # default video quality without agent\nRANDOM_SEED = 42\nRAND_RANGE = 1000\nSUMMARY_DIR = './results_gs'\nLOG_FILE = './results_gs/log'\nTEST_LOG_FOLDER = './test_results_gs/'\nTRAIN_TRACES = './cooked_traces/'\n# NN_MODEL = './results/pretrain_linear_reward.ckpt'\nNN_MODEL = None\nGLOBAL_WORKERS = 4\nNUM_WORKERS = 2\nEPOCHS = 100000\nENTROPY_WEIGHT = 5\n\n\nclass Worker():\n def __init__(self, global_assignment, name, all_cooked_time, all_cooked_bw, saver_thread, queue, global_actor):\n self.name = str(name)+\"_worker_\" + str(global_assignment)\n self.global_assignment = self.name[-1] # get global assignment index\n self.local_actor = ActorNetwork(state_dim=[\n S_INFO, S_LEN], action_dim=A_DIM, learning_rate=ACTOR_LR_RATE, global_workers=GLOBAL_WORKERS, scope=\"actor_\" + self.name, entropy_weight=ENTROPY_WEIGHT)\n self.local_critic = CriticNetwork(state_dim=[\n S_INFO, S_LEN], learning_rate=CRITIC_LR_RATE, global_workers=GLOBAL_WORKERS, scope=\"critic_\" + self.name)\n self.env = env.Environment(\n all_cooked_time=all_cooked_time, all_cooked_bw=all_cooked_bw)\n self.saver_thread = saver_thread\n\n self.global_actor = global_actor\n\n if self.saver_thread:\n self.queue = queue\n\n def train(self, sess, actor_gradient, critic_gradient):\n self.local_actor.apply_gradients(sess, actor_gradient)\n self.local_critic.apply_gradients(sess, critic_gradient)\n\n self.other_ids = list(range(GLOBAL_WORKERS))\n del self.other_ids[int(self.global_assignment)]\n\n self.actor_block_stats = self.local_actor.get_block_vars(sess)\n self.valid_actor_updates = list(np.array(self.actor_block_stats))\n\n # loop until all locks are de-activated\n while not all([v == True for v in self.valid_actor_updates]):\n self.actor_block_stats = self.local_actor.get_block_vars(sess)\n self.valid_actor_updates = list(np.array(self.actor_block_stats))\n\n feed_dict_actor = {k: v for (k, v) in zip(\n self.local_actor.feed_gradients, actor_gradient)}\n feed_dict_actor[self.local_actor.lr_placeholder] = self.local_actor.lr_rate\n feed_dict_actor[self.local_actor.entropy_weight_placeholder] = self.local_actor.entropy_weight\n\n feed_dict_critic = {k: v for (k, v) in zip(\n self.local_critic.feed_gradients, critic_gradient)}\n feed_dict_critic[self.local_critic.lr_placeholder] = self.local_critic.lr_rate\n\n for i in range(GLOBAL_WORKERS):\n # activate all locks\n sess.run(self.local_actor.block_global[int(i)])\n for i in range(len(self.other_ids)):\n sess.run(self.local_actor.apply_other_grads[int(\n i)], feed_dict=feed_dict_actor)\n sess.run(self.local_critic.apply_other_grads[int(\n i)], feed_dict=feed_dict_critic)\n for i in range(GLOBAL_WORKERS):\n # de-activate all locks\n sess.run(self.local_actor.unblock_global[int(i)])\n\n def set_entropy(self, epoch):\n if epoch == 10000:\n self.local_actor.entropy_weight = 4\n\n if epoch == 20000:\n self.local_actor.entropy_weight = 2\n if epoch == 30000:\n self.local_actor.entropy_weight = 1\n\n if epoch == 40000:\n self.local_actor.entropy_weight = 0.5\n if epoch == 50000:\n self.local_actor.entropy_weight = 0.3\n\n if epoch == 60000:\n self.local_actor.entropy_weight = 0.05\n\n if epoch == 70000:\n self.local_actor.entropy_weight = 0.01\n\n def work(self, sess):\n print('started worker ' + str(self.global_assignment))\n\n start = time()\n\n test_log_file_path = LOG_FILE + '_test_' + str(self.global_assignment)\n\n with sess.as_default(), sess.graph.as_default(), open(LOG_FILE + self.name, 'wb') as log_file, open(test_log_file_path, 'wb') as test_log_file:\n\n self.local_actor.transfer_global_params(sess)\n self.local_critic.transfer_global_params(sess)\n\n epoch = 0\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n s_batch = [np.zeros((S_INFO, S_LEN))]\n a_batch = [action_vec]\n r_batch = []\n entropy_record = []\n\n time_stamp = 0\n while epoch < EPOCHS: # experience video streaming forever\n\n if epoch % 1000 == 0:\n print(self.name + \" in epoch \" + str(epoch))\n\n self.set_entropy(epoch)\n\n # the action is from the last decision\n # this is to make the framework similar to the real\n delay, sleep_time, buffer_size, rebuf, \\\n video_chunk_size, next_video_chunk_sizes, \\\n end_of_video, video_chunk_remain = \\\n self.env.get_video_chunk(bit_rate)\n\n time_stamp += delay # in ms\n time_stamp += sleep_time # in ms\n\n # -- linear reward --\n # reward is video quality - rebuffer penalty - smoothness\n reward = VIDEO_BIT_RATE[bit_rate] / M_IN_K \\\n - REBUF_PENALTY * rebuf \\\n - SMOOTH_PENALTY * np.abs(VIDEO_BIT_RATE[bit_rate] -\n VIDEO_BIT_RATE[last_bit_rate]) / M_IN_K\n\n # -- log scale reward --\n # log_bit_rate = np.log(\n # VIDEO_BIT_RATE[bit_rate] / float(VIDEO_BIT_RATE[-1]))\n # log_last_bit_rate = np.log(\n # VIDEO_BIT_RATE[last_bit_rate] / float(VIDEO_BIT_RATE[-1]))\n\n # reward = log_bit_rate \\\n # - REBUF_PENALTY * rebuf \\\n # - SMOOTH_PENALTY * \\\n # np.abs(log_bit_rate - log_last_bit_rate)\n\n # -- HD reward --\n # reward = HD_REWARD[bit_rate] \\\n # - REBUF_PENALTY * rebuf \\\n # - SMOOTH_PENALTY * \\\n # np.abs(HD_REWARD[bit_rate] - HD_REWARD[last_bit_rate])\n\n r_batch.append(reward)\n\n last_bit_rate = bit_rate\n\n # retrieve previous state\n if len(s_batch) == 0:\n state = [np.zeros((S_INFO, S_LEN))]\n else:\n state = np.array(s_batch[-1], copy=True)\n\n # dequeue history record\n state = np.roll(state, -1, axis=1)\n\n # this should be S_INFO number of terms\n state[0, -1] = VIDEO_BIT_RATE[bit_rate] / \\\n float(np.max(VIDEO_BIT_RATE)) # last quality\n state[1, -1] = buffer_size / BUFFER_NORM_FACTOR # 10 sec\n state[2, -1] = float(video_chunk_size) / \\\n float(delay) / M_IN_K # kilo byte / ms\n state[3, -1] = float(delay) / M_IN_K / \\\n BUFFER_NORM_FACTOR # 10 sec\n state[4, :A_DIM] = np.array(\n next_video_chunk_sizes) / M_IN_K / M_IN_K # mega byte\n state[5, -1] = np.minimum(video_chunk_remain,\n CHUNK_TIL_VIDEO_END_CAP) / float(CHUNK_TIL_VIDEO_END_CAP)\n\n # compute action probability vector\n action_prob = self.local_actor.predict(\n sess, np.reshape(state, (1, S_INFO, S_LEN)))\n action_cumsum = np.cumsum(action_prob)\n bit_rate = (action_cumsum > np.random.randint(\n 1, RAND_RANGE) / float(RAND_RANGE)).argmax()\n # Note: we need to discretize the probability into 1/RAND_RANGE steps,\n # because there is an intrinsic discrepancy in passing single state and batch states\n\n # entropy_record.append(a3c.compute_entropy(action_prob[0]))\n\n # log time_stamp, bit_rate, buffer_size, reward\n log_file.write(str(time_stamp) + '\\t' +\n str(VIDEO_BIT_RATE[bit_rate]) + '\\t' +\n str(buffer_size) + '\\t' +\n str(rebuf) + '\\t' +\n str(video_chunk_size) + '\\t' +\n str(delay) + '\\t' +\n str(reward) + '\\n')\n log_file.flush()\n\n epoch += 1\n\n # report experience to the coordinator\n if len(r_batch) >= TRAIN_SEQ_LEN or end_of_video:\n\n actor_gradient, critic_gradient, td_batch = \\\n compute_gradients(\n sess=sess,\n s_batch=np.stack(s_batch, axis=0),\n a_batch=np.vstack(a_batch),\n r_batch=np.vstack(r_batch),\n terminal=end_of_video, actor=self.local_actor, critic=self.local_critic)\n\n self.train(sess, actor_gradient, critic_gradient)\n\n self.local_actor.update_local_params(sess)\n self.local_critic.update_local_params(sess)\n\n del s_batch[:]\n del a_batch[:]\n del r_batch[:]\n del entropy_record[:]\n\n # so that in the log we know where video ends\n log_file.write('\\n')\n\n # store the state and action into batches\n if end_of_video:\n last_bit_rate = DEFAULT_QUALITY\n bit_rate = DEFAULT_QUALITY # use the default action here\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n\n s_batch.append(np.zeros((S_INFO, S_LEN)))\n a_batch.append(action_vec)\n\n else:\n s_batch.append(state)\n\n action_vec = np.zeros(A_DIM)\n action_vec[bit_rate] = 1\n a_batch.append(action_vec)\n\n if epoch % MODEL_SAVE_INTERVAL == 0 and self.saver_thread and epoch >= 70000:\n\n save_path = SUMMARY_DIR + '/global_' + self.global_assignment + \\\n '/nn_model_' + str(epoch) + '.pickle'\n f = open(save_path, 'wb')\n params = self.global_actor.get_network_params(sess)\n pickle.dump(params, f)\n f.close()\n\n self.queue.put(\n {'epoch': epoch, 'params': params})\n # del params\n # gc.collect()\n\n if self.saver_thread:\n self.queue.put({'epoch': 'finished'})\n end = time()\n elapsed = strftime(\"%Hh%Mm%Ss\", gmtime(end - start))\n # write elapsed time for testing\n with open(LOG_FILE + '_time_training_' + str(self.name), 'w') as f:\n f.write(elapsed)\n f.close()\n\n\ndef main():\n\n os.system('rm -r ' + SUMMARY_DIR)\n os.system('mkdir ' + SUMMARY_DIR)\n os.system('rm -r ' + TEST_LOG_FOLDER)\n os.system('mkdir ' + TEST_LOG_FOLDER)\n\n global_actors = []\n global_critics = []\n testing_actors = []\n\n workers = []\n queues = []\n all_cooked_time, all_cooked_bw, _ = load_trace.load_trace(TRAIN_TRACES)\n\n for i in range(GLOBAL_WORKERS):\n queues.append(Queue())\n agent_name = 'global_'+str(i)\n os.system('rm -r ' + SUMMARY_DIR + '/' + agent_name)\n os.system('mkdir ' + SUMMARY_DIR + '/' + agent_name)\n\n global_actors.append(ActorNetwork(state_dim=[\n S_INFO, S_LEN], action_dim=A_DIM, learning_rate=ACTOR_LR_RATE, global_workers=None, scope='actor_'+agent_name, entropy_weight=None))\n global_critics.append(CriticNetwork(state_dim=[\n S_INFO, S_LEN], learning_rate=CRITIC_LR_RATE, global_workers=GLOBAL_WORKERS, scope='critic_'+agent_name))\n testing_actors.append(ActorNetwork(state_dim=[\n S_INFO, S_LEN], action_dim=A_DIM, learning_rate=ACTOR_LR_RATE, global_workers=None, scope='testing_actor_'+agent_name, entropy_weight=None))\n\n for j in range(GLOBAL_WORKERS):\n for i in range(NUM_WORKERS):\n workers.append(Worker(global_assignment=j, name=i, all_cooked_time=all_cooked_time,\n all_cooked_bw=all_cooked_bw, saver_thread=i == 0, queue=queues[j] if i == 0 else None, global_actor=global_actors[j])) # create workers for each global parameter set\n\n with tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n\n testing_threads = []\n for i in range(GLOBAL_WORKERS):\n def tester_work(): return run_tests(\n sess, queues[i], testing_actors[i])\n t = threading.Thread(target=(tester_work))\n t.start()\n testing_threads.append(t)\n sleep(0.1)\n\n worker_threads = []\n for worker in workers:\n def worker_work(): return worker.work(sess)\n # threading operator to run multiple workers\n t = threading.Thread(target=(worker_work))\n t.start()\n sleep(0.1)\n worker_threads.append(t)\n\n for t in worker_threads:\n t.join()\n for t in testing_threads:\n t.join()\n\n sess.close()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"numpy.minimum",
"numpy.abs",
"numpy.reshape",
"numpy.cumsum",
"numpy.stack",
"tensorflow.global_variables_initializer",
"numpy.max",
"tensorflow.Session",
"numpy.array",
"numpy.zeros",
"numpy.roll",
"numpy.vstack",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
lordtt13/Cybint-AI-projects | [
"87ad57a2e9c432483c2256408dd15762b7897b56"
] | [
"Shoe-Classification/train_.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Nov 9 22:39:22 2019\n\n@author: tanma\n\"\"\"\n\nfrom keras.callbacks import ModelCheckpoint,LearningRateScheduler,EarlyStopping\nfrom keras.optimizers import Adam\nfrom models import model\nfrom utils import train_val_generator\nfrom matplotlib import pyplot as plt\nfrom keras.models import load_model\nfrom os.path import isfile\n\n\nchoices=['vgg_16','vgg_19','resnet_152','simple']\nmodel_name = choices[2]\n\nis_transfer = True\nnum_freeze_layer = 600\nnum_classes = 4\nweights_path = 'resnet152_weights_tf.h5'\ninput_shape = (224,224,3) # Input Shape for Resnet152\n\ntrain_path = './train'\ntest_path ='./test'\n\n\n\n# model for traning\ntr_model = model(model_name,num_classes,is_transfer,\n num_freeze_layer,weights_path,input_shape)\n\n# train and test generator\ntrain_gen, val_gen = train_val_generator(32,train_path,test_path)\n\n# load last model if exists\nmodel_name = model_name+'.h5'\nif isfile(model_name):\n print('Loading previously trained weights and continue traning.....')\n tr_model = load_model(model_name)\nelse:\n print('No saved weights found.')\n\n# model saving\ncheckpoint = ModelCheckpoint(model_name+'.h5',monitor='val_acc',verbose=1,save_best_only=True)\nearly_stop = EarlyStopping(monitor='val_acc',min_delta=0,patience=10,verbose=1,mode='auto')\n\n# Compile the model\ntr_model.compile(loss='categorical_crossentropy',optimizer=Adam(1e-5),metrics=['accuracy'])\n\n# train the model\nhistory = tr_model.fit_generator(\n train_gen,\n steps_per_epoch=1400,\n epochs=30,\n validation_data = val_gen,\n validation_steps = 250,\n callbacks = [checkpoint,early_stop])\n\n# plot the results\nplt.figure()\nplt.plot(history.history['acc'])\nplt.plot(history.history['val_acc'])\nplt.title('model_accuracy')\nplt.ylabel('accuracy')\nplt.xlabel('epoch')\nplt.legend(['train','test'])\nplt.savefig(model_name+'.jpg')\n\ntr_model.save('vanilla.h5')"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wiggin66/server | [
"d32e253244be8539a087ba59fee5ab63f9f6a040"
] | [
"qa/L0_sequence_batcher/sequence_batcher_test.py"
] | [
"# Copyright (c) 2018-2020, NVIDIA CORPORATION. All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions\n# are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of NVIDIA CORPORATION nor the names of its\n# contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY\n# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nimport sys\nsys.path.append(\"../common\")\n\nfrom builtins import range\nfrom builtins import str\nfrom future.utils import iteritems\nimport os\nimport time\nimport threading\nimport traceback\nimport unittest\nimport numpy as np\nimport test_util as tu\nimport sequence_util as su\n\nTEST_SYSTEM_SHARED_MEMORY = bool(\n int(os.environ.get('TEST_SYSTEM_SHARED_MEMORY', 0)))\nTEST_CUDA_SHARED_MEMORY = bool(int(os.environ.get('TEST_CUDA_SHARED_MEMORY',\n 0)))\n\nUSE_GRPC = (os.environ.get('USE_GRPC', 1) != \"0\")\nUSE_HTTP = (os.environ.get('USE_HTTP', 1) != \"0\")\nassert USE_GRPC or USE_HTTP, \"USE_GRPC or USE_HTTP must be non-zero\"\nif USE_GRPC and USE_HTTP:\n _protocols = (\"http\", \"grpc\")\nelif USE_GRPC:\n _protocols = (\"grpc\",)\nelse:\n _protocols = (\"http\",)\n\nBACKENDS = os.environ.get('BACKENDS', \"graphdef savedmodel onnx plan custom\")\nENSEMBLES = bool(int(os.environ.get('ENSEMBLES', 1)))\n\nNO_BATCHING = (int(os.environ['NO_BATCHING']) == 1)\nMODEL_INSTANCES = int(os.environ['MODEL_INSTANCES'])\n\n_trials = ()\nif NO_BATCHING:\n for backend in BACKENDS.split(' '):\n if (backend != \"libtorch\") and (backend != 'custom'):\n _trials += (backend + \"_nobatch\",)\nelif os.environ['BATCHER_TYPE'] == \"VARIABLE\":\n for backend in BACKENDS.split(' '):\n if (backend != \"libtorch\") and (backend != 'custom') and (backend !=\n 'plan'):\n _trials += (backend,)\nelse:\n _trials = BACKENDS.split(' ')\n\n# Add ensemble to the _trials\nENSEMBLE_PREFIXES = [\"simple_\", \"sequence_\", \"fan_\"]\n\nif ENSEMBLES:\n res = []\n for trial in _trials:\n res.append(trial)\n if (\"custom\" in trial):\n continue\n for ensemble_prefix in ENSEMBLE_PREFIXES:\n res.append(ensemble_prefix + trial)\n _trials = tuple(res)\n\n_ragged_batch_supported_trials = list()\nif \"custom\" in _trials:\n _ragged_batch_supported_trials = (\"custom\",)\n\n# Not all models can be tested for ragged handling because the models\n# don't deal well with non-size-1 shapes\n_ragged_batch_not_supported_trials = list()\nif os.environ['BATCHER_TYPE'] == \"VARIABLE\":\n if \"custom\" in _trials:\n _ragged_batch_not_supported_trials.append(\"custom\")\n if \"plan\" in _trials:\n _ragged_batch_not_supported_trials.append(\"plan\")\n if \"onnx\" in _trials:\n _ragged_batch_not_supported_trials.append(\"onnx\")\n\n_max_sequence_idle_ms = 5000\n\n\n# Checks whether the provided model name belongs to an ensemble\n# model.\ndef is_ensemble(model_name):\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n return True\n return False\n\n\nclass SequenceBatcherTest(su.SequenceBatcherTestUtil):\n\n def get_datatype(self, trial):\n # Get the datatype to use based on what models are available (see test.sh)\n if (\"plan\" in trial) or (\"savedmodel\" in trial):\n return np.float32\n if (\"graphdef\" in trial):\n return np.dtype(object)\n return np.int32\n\n def get_expected_result(self, expected_result, value, trial, flag_str=None):\n # Adjust the expected_result for models that\n # couldn't implement the full accumulator. See\n # qa/common/gen_qa_sequence_models.py for more\n # information.\n if ((not NO_BATCHING and\n (\"custom\" not in trial)) or (\"graphdef\" in trial) or\n (\"plan\" in trial) or (\"onnx\" in trial)) or (\"libtorch\" in trial):\n expected_result = value\n if (flag_str is not None) and (\"start\" in flag_str):\n expected_result += 1\n return expected_result\n\n def test_simple_sequence(self):\n # Send one sequence and check for correct accumulator\n # result. The result should be returned immediately.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 5,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((\"start\", 1, None, None), (None, 2, None, None),\n (None, 3, None, None), (None, 4, None, None),\n (None, 5, None, None), (None, 6, None, None),\n (None, 7, None, None), (None, 8, None, None),\n (\"end\", 9, None, None)),\n self.get_expected_result(45, 9, trial, \"end\"),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.check_status(model_name, {1: 9 * (idx + 1)},\n 9 * (idx + 1), 9 * (idx + 1))\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n\n def test_length1_sequence(self):\n # Send a length-1 sequence and check for correct accumulator\n # result. The result should be returned immediately.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 99,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n (\n (\"start,end\", 42, None, None),),\n self.get_expected_result(42, 42, trial, \"start,end\"),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.check_status(model_name, {1: idx + 1}, (idx + 1),\n (idx + 1))\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n\n def test_batch_size(self):\n # Send sequence with a batch-size > 1 and check for error.\n\n # When 4 model instances the max-batch-size is 1 so can't test\n # since that gives a different error: \"batch-size 2 exceeds\n # maximum batch size\"\n if (MODEL_INSTANCES == 4) or NO_BATCHING:\n return\n\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 27,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((\"start\", 1, None, None), (\"end\", 9, None, None)),\n self.get_expected_result(10, 9, trial, \"end\"),\n protocol,\n batch_size=2,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.assertTrue(False, \"expected error\")\n except Exception as ex:\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n base_model_name = model_name[(len(prefix)):]\n self.assertTrue(ex.message().startswith(\n str(\"in ensemble '{}', \" +\n \"inference request to model '{}' must specify \"\n +\n \"batch-size 1 due to requirements of sequence \"\n + \"batcher\").format(model_name,\n base_model_name)))\n return\n self.assertTrue(ex.message().startswith(\n str(\"inference request to model '{}' must specify \" +\n \"batch-size 1 due to requirements of sequence \" +\n \"batcher\").format(model_name)))\n\n def test_no_correlation_id(self):\n # Send sequence without correlation ID and check for error.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 0, # correlation_id = 0\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((\"start\", 1, None, None), (\"end\", 9, None, None)),\n self.get_expected_result(10, 9, trial, \"end\"),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.assertTrue(False, \"expected error\")\n except Exception as ex:\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n base_model_name = model_name[(len(prefix)):]\n self.assertTrue(ex.message().startswith(\n str(\"in ensemble '{}', \" +\n \"inference request to model '{}' must specify a \"\n + \"non-zero correlation ID\").format(\n model_name, base_model_name)))\n return\n self.assertTrue(ex.message().startswith(\n str(\"inference request to model '{}' must specify a \" +\n \"non-zero correlation ID\").format(model_name)))\n\n def test_no_sequence_start(self):\n # Send sequence without start flag for never before seen\n # correlation ID. Expect failure.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 37469245,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((None, 1, None, None), (None, 2, None, None),\n (\"end\", 3, None, None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.assertTrue(False, \"expected error\")\n except Exception as ex:\n print(model_name + \"-> \" + ex.message())\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n base_model_name = model_name[(len(prefix)):]\n self.assertTrue(ex.message().startswith(\n str(\"in ensemble '{}', \" +\n \"inference request for sequence 37469245 to \"\n +\n \"model '{}' must specify the START flag on the first \"\n + \"request of the sequence\").format(\n model_name, base_model_name)))\n return\n self.assertTrue(ex.message().startswith(\n str(\"inference request for sequence 37469245 to \" +\n \"model '{}' must specify the START flag on the first \"\n + \"request of the sequence\").format(model_name)))\n\n def test_no_sequence_start2(self):\n # Send sequence without start flag after sending a valid\n # sequence with the same correlation ID. Expect failure for\n # the second sequence.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 3,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((\"start\", 1, None, None), (None, 2, None, None),\n (\"end\", 3, None, None), (None, 55, None, None)),\n self.get_expected_result(6, 3, trial, None),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_status(model_name, {1: 3 * (idx + 1)},\n 3 * (idx + 1), 3 * (idx + 1))\n self.check_deferred_exception()\n self.assertTrue(False, \"expected error\")\n except Exception as ex:\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n base_model_name = model_name[(len(prefix)):]\n self.assertTrue(ex.message().startswith(\n str(\"in ensemble '{}', \" +\n \"inference request for sequence 3 to model '{}' must \"\n +\n \"specify the START flag on the first request of \"\n + \"the sequence\").format(\n model_name, base_model_name)))\n return\n self.assertTrue(ex.message().startswith(\n str(\"inference request for sequence 3 to model '{}' must \"\n +\n \"specify the START flag on the first request of \" +\n \"the sequence\").format(model_name)))\n\n def test_no_sequence_end(self):\n # Send sequence without end flag. Use same correlation ID to\n # send another sequence. The first sequence will be ended\n # automatically but the second should complete successfully.\n for trial in _trials:\n # Run on different protocols.\n for idx, protocol in enumerate(_protocols):\n self.clear_deferred_exceptions()\n try:\n dtype = self.get_datatype(trial)\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n self.assertFalse(\n \"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertFalse(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n\n self.check_sequence(\n trial,\n model_name,\n dtype,\n 4566,\n (4000, None),\n # (flag_str, value, (ls_ms, gt_ms), (pre_delay, post_delay))\n ((\"start\", 1, None, None), (None, 2, None, None),\n (\"start\", 42, None, None), (\"end\", 9, None, None)),\n self.get_expected_result(51, 9, trial, \"end\"),\n protocol,\n sequence_name=\"{}_{}\".format(self._testMethodName,\n protocol))\n\n self.check_deferred_exception()\n self.check_status(model_name, {1: 4 * (idx + 1)},\n 4 * (idx + 1), 4 * (idx + 1))\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n\n def test_half_batch(self):\n # Test model instances that together are configured with\n # total-batch-size 4. Send two equal-length sequences in\n # parallel and make sure they get completely batched into\n # batch-size 2 inferences.\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions(\n (1, 2, 3, 4), dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (0, 9, 5, 13), dtype, 1)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 8)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 987,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None),\n (None, 3, None), (\"end\", 4, None)),\n self.get_expected_result(10, 4, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 988,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 0, None), (None, 9, None),\n (None, 5, None), (\"end\", 13, None)),\n self.get_expected_result(27, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 8}, 8, 8)\n else:\n stats_batch_size = 2 if MODEL_INSTANCES == 1 else 1\n exec_cnt = 4 if MODEL_INSTANCES == 1 else 8\n self.check_status(\n model_name,\n {stats_batch_size: 4 * min(2, MODEL_INSTANCES)},\n exec_cnt, 8)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n\n def test_skip_batch(self):\n # Test model instances together are configured with\n # total-batch-size 4. Send four sequences in parallel where\n # two sequences have shorter length so that padding must be\n # applied correctly for the longer sequences.\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13, 14), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113, 1114), dtype, 3)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (\"end\", 3, None)),\n self.get_expected_result(4, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None),\n (None, 13, None), (\"end\", 14, None)),\n self.get_expected_result(50, 14, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (\"end\", 113, None)),\n self.get_expected_result(224, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (None, 1113, None), (\"end\", 1114, None)),\n self.get_expected_result(4450, 1114, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[1].start()\n threads[3].start()\n time.sleep(3)\n threads[0].start()\n threads[2].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 12}, 12, 12)\n else:\n # Batch size is 4 for the first two inferences and\n # then 2 for the second two inferences. This is\n # because we request the longer sequences first\n # (threads 1 and 3) in slots 0 and 1 and so after\n # shorter sequences are complete there are only slots\n # 0 and 1 to execute.\n if MODEL_INSTANCES == 1:\n self.check_status(model_name, {2: 2, 4: 2}, 4, 12)\n elif MODEL_INSTANCES == 2:\n self.check_status(model_name, {2: 4, 1: 4}, 8, 12)\n elif MODEL_INSTANCES == 4:\n self.check_status(model_name, {1: 12}, 12, 12)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n\n def test_full_batch(self):\n # Test model instances together are configured with\n # total-batch-size 4. Send four equal-length sequences in\n # parallel and make sure they get completely batched into\n # batch-size 4 inferences.\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 2, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None), (\"end\", 13,\n None)),\n self.get_expected_result(36, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (\"end\", 113, None)),\n self.get_expected_result(336, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 12}, 12, 12)\n else:\n self.check_status(model_name, {\n (4 / MODEL_INSTANCES): (3 * MODEL_INSTANCES)\n }, 3 * MODEL_INSTANCES, 12)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n\n def test_ragged_batch(self):\n # Test model instances that together are configured with\n # total-batch-size 4. The sequences use the different size\n # inputs and the inputs are *not* marked as allowing ragged\n # batch. Send four equal-length sequences in parallel and\n # make sure they don't get batched.\n\n # Only works with 1 model instance since want to test all\n # sequences batching together.\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _ragged_batch_not_supported_trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions(\n (1, 2, 3), dtype, 0, tensor_shape=(2,))\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13), dtype, 1, tensor_shape=(2,))\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 113), dtype, 2, tensor_shape=(1,))\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3, tensor_shape=(3,))\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6 * 2, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (2,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None), (\"end\", 13,\n None)),\n self.get_expected_result(36 * 2, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (2,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (\"end\", 113, None)),\n self.get_expected_result(336, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (1,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336 * 3, 1113, trial,\n \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (3,)\n }))\n\n threads[0].start()\n threads[1].start()\n threads[2].start()\n time.sleep(3)\n threads[3].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 12}, 12, 12)\n else:\n self.check_status(model_name, {4: 9}, 9, 12)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n\n def test_ragged_batch_allowed(self):\n # Test model instances that together are configured with\n # total-batch-size 4. The sequences use the different size\n # inputs. Send four equal-length sequences in parallel and\n # make sure they get batched appropriately even with size\n # differences.\n\n # Only works with 1 model instance since want to test all\n # sequences batching together.\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _ragged_batch_supported_trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions(\n (1, 2, 3), dtype, 0, tensor_shape=(2,))\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13), dtype, 1, tensor_shape=(2,))\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 113), dtype, 2, tensor_shape=(1,))\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3, tensor_shape=(3,))\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6 * 2, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (2,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None), (\"end\", 13,\n None)),\n self.get_expected_result(36 * 2, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (2,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (\"end\", 113, None)),\n self.get_expected_result(336, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (1,)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336 * 3, 1113, trial,\n \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName),\n 'tensor_shape': (3,)\n }))\n\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 12}, 12, 12)\n else:\n self.check_status(model_name, {4: 3}, 3, 12)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n\n def test_backlog(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 5 equal-length sequences in\n # parallel and make sure they get completely batched into\n # batch-size 4 inferences plus the 5th should go in the\n # backlog and then get handled once there is a free slot.\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 2, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions(\n (11111, 11112, 11113), dtype, 4)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None), (\"end\", 13,\n None)),\n self.get_expected_result(36, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (\"end\", 113, None)),\n self.get_expected_result(336, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1005,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11111, None), (None, 11112, None),\n (\"end\", 11113, None)),\n self.get_expected_result(33336, 11113, trial,\n \"end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n for t in threads:\n t.start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 15}, 15, 15)\n else:\n if MODEL_INSTANCES == 1:\n self.check_status(model_name, {4: 3, 1: 3}, 6, 15)\n elif MODEL_INSTANCES == 2:\n self.check_status(model_name, {2: 6, 1: 3}, 9, 15)\n else:\n self.check_status(model_name, {1: 15}, 15, 15)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n\n def test_backlog_fill(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 4 sequences in parallel, two of\n # which are shorter. Send 2 additional sequences that should\n # go into backlog but should immediately fill into the short\n # sequences.\n\n # Only works with 1 model instance since otherwise an instance\n # can run ahead and handle more work than expected (leads to\n # intermittent failures)\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 2, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions((11, 13),\n dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions((11111,),\n dtype, 4)\n precreated_shm5_handles = self.precreate_register_regions((22222,),\n dtype, 5)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 10)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 2)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (\"end\", 13, None)),\n self.get_expected_result(24, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (\"end\", 113, None)),\n self.get_expected_result(224, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1005,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start,end\", 11111, None),),\n self.get_expected_result(11111, 11111, trial,\n \"start,end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1006,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start,end\", 22222, None),),\n self.get_expected_result(22222, 22222, trial,\n \"start,end\"),\n precreated_shm5_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n threads[1].start()\n threads[2].start()\n threads[3].start()\n time.sleep(3)\n threads[4].start()\n threads[5].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 12}, 12, 12)\n else:\n self.check_status(model_name, {4: 3}, 3, 12)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n self.cleanup_shm_regions(precreated_shm5_handles)\n\n def test_backlog_fill_no_end(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 4 sequences in parallel, two of\n # which are shorter. Send 2 additional sequences that should\n # go into backlog but should immediately fill into the short\n # sequences. One of those sequences is filled before it gets\n # its end request.\n\n # Only works with 1 model instance since otherwise an instance\n # can run ahead and handle more work than expected (leads to\n # intermittent failures)\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 2, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions((11, 13),\n dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions((11111,),\n dtype, 4)\n precreated_shm5_handles = self.precreate_register_regions(\n (22222, 22223, 22224), dtype, 5)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 10)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 3)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (\"end\", 13, None)),\n self.get_expected_result(24, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (\"end\", 113, None)),\n self.get_expected_result(224, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1005,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start,end\", 11111, None),),\n self.get_expected_result(11111, 11111, trial,\n \"start,end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1006,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 22222, None),\n (None, 22223, None),\n (\"end\", 22224, 2000),\n ),\n self.get_expected_result(66669, 22224, trial,\n \"end\"),\n precreated_shm5_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n time.sleep(2)\n threads[1].start()\n time.sleep(2)\n threads[2].start()\n time.sleep(2)\n threads[3].start()\n time.sleep(2)\n threads[4].start()\n time.sleep(2)\n threads[5].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 14}, 14, 14)\n else:\n # Expecting 3 batch-size 4 inferences and then the\n # 1006 sequence will follow 1003 (a different\n # implementation could also follow 1002...)\n self.check_status(model_name, {4: 3, 3: 2}, 5, 14)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n self.cleanup_shm_regions(precreated_shm5_handles)\n\n def test_backlog_same_correlation_id(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 4 equal-length sequences in\n # parallel and make sure they get completely batched into\n # batch-size 4 inferences. Send a 5th with the same\n # correlation ID as one of the first four.\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 2, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 13), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions(\n (11111, 11113), dtype, 4)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 12)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 2)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 2, None), (\"end\", 3,\n None)),\n self.get_expected_result(6, 3, trial, \"end\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None), (\"end\", 13,\n None)),\n self.get_expected_result(36, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (\"end\", 113, None)),\n self.get_expected_result(336, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (\"end\", 1113, None)),\n self.get_expected_result(3336, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11111, None), (\"end\", 11113, None)),\n self.get_expected_result(22224, 11113, trial,\n \"end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n threads[1].start()\n threads[2].start()\n threads[3].start()\n time.sleep(3)\n threads[4].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 14}, 14, 14)\n else:\n if MODEL_INSTANCES != 4:\n batch_exec = {\n (4 / MODEL_INSTANCES): (3 * MODEL_INSTANCES),\n 1: 2\n }\n else:\n batch_exec = {1: (3 * MODEL_INSTANCES) + 2}\n self.check_status(model_name, batch_exec,\n (3 * MODEL_INSTANCES) + 2, 14)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n\n def test_backlog_same_correlation_id_no_end(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 4 sequences in parallel and\n # make sure they get completely batched into batch-size 4\n # inferences. One of the sequences is shorter and does not\n # have an end marker but has same correlation ID as the 5th\n # sequence. We expect that short sequence to get ended early\n # (because of the same correlation ID) and make room for the\n # 5th sequence.\n\n # Only works with 1 model instance since otherwise an instance\n # can run ahead and handle more work than expected (leads to\n # intermittent failures)\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 12, 13), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 112, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions(\n (11111, 11113), dtype, 4)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for both sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 16)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None), (None, 3, None)),\n self.get_expected_result(4, 3, trial, None),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12, None),\n (None, 12, None), (\"end\", 13, None)),\n self.get_expected_result(48, 13, trial, \"end\"),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112, None),\n (None, 112, None), (\"end\", 113, None)),\n self.get_expected_result(448, 113, trial, \"end\"),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112, None),\n (None, 1112, None), (\"end\", 1113, None)),\n self.get_expected_result(4448, 1113, trial, \"end\"),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11111, None), (\"end\", 11113, None)),\n self.get_expected_result(22224, 11113, trial,\n \"end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n threads[1].start()\n threads[2].start()\n threads[3].start()\n time.sleep(2)\n threads[4].start()\n for t in threads:\n t.join()\n self.check_deferred_exception()\n if is_ensemble(model_name):\n # Requests do not get batched for the ensemble model\n self.check_status(model_name, {1: 16}, 16, 16)\n else:\n self.check_status(model_name, {4: 4}, 4, 16)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n\n def test_backlog_sequence_timeout(self):\n # Test model instances together are configured with\n # total-max-batch-size 4. Send 4 sequences in parallel and\n # make sure they get completely batched into batch-size 4\n # inferences. One of the sequences has a long delay that\n # causes it to timeout and that allows a 5th sequence to come\n # out of the backlog and finish. The timed-out sequence will\n # then send the delayed inference but it will appear as a new\n # sequence and so fail because it doesn't have the START flag.\n\n # Only works with 1 model instance since otherwise an instance\n # can run ahead and handle more work than expected (leads to\n # intermittent failures)\n if MODEL_INSTANCES != 1:\n return\n\n for trial in _trials:\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1, 3),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions(\n (11, 12, 12, 13), dtype, 1)\n precreated_shm2_handles = self.precreate_register_regions(\n (111, 112, 112, 113), dtype, 2)\n precreated_shm3_handles = self.precreate_register_regions(\n (1111, 1112, 1112, 1113), dtype, 3)\n precreated_shm4_handles = self.precreate_register_regions(\n (11111, 11113), dtype, 4)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain all\n # inferences for all sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 4)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1, None),\n (None, 3, _max_sequence_idle_ms + 1000)),\n self.get_expected_result(4, 3, trial, None),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11, None), (None, 12,\n _max_sequence_idle_ms / 2),\n (None, 12, _max_sequence_idle_ms / 2),\n (\"end\", 13, _max_sequence_idle_ms / 2)),\n self.get_expected_result(48, 13, trial, None),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1003,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 111, None), (None, 112,\n _max_sequence_idle_ms / 2),\n (None, 112, _max_sequence_idle_ms / 2),\n (\"end\", 113, _max_sequence_idle_ms / 2)),\n self.get_expected_result(448, 113, trial, None),\n precreated_shm2_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1004,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 1111, None), (None, 1112,\n _max_sequence_idle_ms / 2),\n (None, 1112, _max_sequence_idle_ms / 2),\n (\"end\", 1113, _max_sequence_idle_ms / 2)),\n self.get_expected_result(4448, 1113, trial, None),\n precreated_shm3_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1005,\n (None, None),\n # (flag_str, value, pre_delay_ms)\n ((\"start\", 11111, None), (\"end\", 11113, None)),\n self.get_expected_result(22224, 11113, trial,\n \"end\"),\n precreated_shm4_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n threads[1].start()\n threads[2].start()\n threads[3].start()\n time.sleep(2)\n threads[4].start()\n for t in threads:\n t.join()\n\n self.check_deferred_exception()\n self.assertTrue(False, \"expected error\")\n except Exception as ex:\n for prefix in ENSEMBLE_PREFIXES:\n if model_name.startswith(prefix):\n base_model_name = model_name[(len(prefix)):]\n self.assertTrue(ex.message().startswith(\n str(\"in ensemble '{}', \" +\n \"inference request for sequence 1001 to \" +\n \"model '{}' must specify the START flag on the first \"\n + \"request of the sequence\").format(\n model_name, base_model_name)))\n return\n self.assertTrue(ex.message().startswith(\n str(\"inference request for sequence 1001 to \" +\n \"model '{}' must specify the START flag on the first \" +\n \"request of the sequence\").format(model_name)))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n self.cleanup_shm_regions(precreated_shm2_handles)\n self.cleanup_shm_regions(precreated_shm3_handles)\n self.cleanup_shm_regions(precreated_shm4_handles)\n\n def test_queue_delay_no_min_util(self):\n # Test model that have set max queue delay but minimum slot utilization\n # is 0. Send 2 sequences in parallel and make sure they get completely\n # batched into batch-size 2 inferences. The first sequence only has one\n # request while the second sequence has two, so expecting the second\n # execution to be a batch of 'null, seq 2'. The executions should not be\n # waited.\n\n for trial in _trials:\n is_ensemble = False\n for prefix in ENSEMBLE_PREFIXES:\n if prefix in trial:\n is_ensemble = True\n break\n if is_ensemble:\n continue\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1,),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions((11, 12),\n dtype, 1)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype)\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain 2 sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 2)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (2000, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 1, None),),\n self.get_expected_result(1, 1, trial, \"start\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (2000, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 11, None),\n (None, 12, None),\n ),\n self.get_expected_result(23, 12, trial, None),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n time.sleep(1)\n threads[1].start()\n for t in threads:\n t.join()\n\n self.check_deferred_exception()\n self.check_status(model_name, {2: 2}, 2, 3)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n\n def test_queue_delay_half_min_util(self):\n # Test model that have set max queue delay but minimum slot utilization\n # is 0.5. Send 2 sequences in parallel and make sure they get completely\n # batched into batch-size 2 inferences. The first sequence only has one\n # request while the second sequence has two, so expecting the second\n # execution to be a batch of 'null, seq 2'. The second execution should\n # be waited until the max queue delay is exceeded for sequence 2.\n\n for trial in _trials:\n is_ensemble = False\n for prefix in ENSEMBLE_PREFIXES:\n if prefix in trial:\n is_ensemble = True\n break\n if is_ensemble:\n continue\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1,),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions((11, 12),\n dtype, 1)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype) + \"_half\"\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain 2 sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 2)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (2000, None),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 1, None),),\n self.get_expected_result(1, 1, trial, \"start\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (4000, 3000),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 11, None),\n (None, 12, None),\n ),\n self.get_expected_result(23, 12, trial, None),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n time.sleep(1)\n threads[1].start()\n for t in threads:\n t.join()\n\n self.check_deferred_exception()\n self.check_status(model_name, {2: 2}, 2, 3)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n\n def test_queue_delay_full_min_util(self):\n # Test model that have set max queue delay but minimum slot utilization\n # is 1. Send 2 sequences in parallel and make sure they get completely\n # batched into batch-size 2 inferences. The first sequence only has one\n # request while the second sequence has two, so expecting the second\n # execution to be a batch of 'null, seq 2'. Both executions should be\n # waited until the max queue delay is exceeded.\n\n for trial in _trials:\n is_ensemble = False\n for prefix in ENSEMBLE_PREFIXES:\n if prefix in trial:\n is_ensemble = True\n break\n if is_ensemble:\n continue\n self.clear_deferred_exceptions()\n dtype = self.get_datatype(trial)\n precreated_shm0_handles = self.precreate_register_regions((1,),\n dtype, 0)\n precreated_shm1_handles = self.precreate_register_regions((11, 12),\n dtype, 1)\n try:\n model_name = tu.get_sequence_model_name(trial, dtype) + \"_full\"\n\n self.check_setup(model_name)\n\n # Need scheduler to wait for queue to contain 2 sequences.\n self.assertTrue(\"TRITONSERVER_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_DELAY_SCHEDULER\"]), 2)\n self.assertTrue(\n \"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\" in os.environ)\n self.assertEqual(\n int(os.environ[\"TRITONSERVER_BACKLOG_DELAY_SCHEDULER\"]), 0)\n\n threads = []\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1001,\n (4000, 3000),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 1, None),),\n self.get_expected_result(1, 1, trial, \"start\"),\n precreated_shm0_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n threads.append(\n threading.Thread(\n target=self.check_sequence_async,\n args=(\n trial,\n model_name,\n dtype,\n 1002,\n (6000, 5000),\n # (flag_str, value, pre_delay_ms)\n (\n (\"start\", 11, None),\n (None, 12, 2000),\n ),\n self.get_expected_result(23, 12, trial, None),\n precreated_shm1_handles),\n kwargs={\n 'sequence_name': \"{}\".format(self._testMethodName)\n }))\n\n threads[0].start()\n time.sleep(1)\n threads[1].start()\n for t in threads:\n t.join()\n\n self.check_deferred_exception()\n self.check_status(model_name, {2: 2}, 2, 3)\n except Exception as ex:\n self.assertTrue(False, \"unexpected error {}\".format(ex))\n finally:\n if TEST_SYSTEM_SHARED_MEMORY or TEST_CUDA_SHARED_MEMORY:\n self.cleanup_shm_regions(precreated_shm0_handles)\n self.cleanup_shm_regions(precreated_shm1_handles)\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] | [
[
"numpy.dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
aaronique/whisky-recommender-app | [
"35c33aad472561433eedfa252e8dff5d4772b516"
] | [
"eda/eda.py"
] | [
"import os\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans, MeanShift, DBSCAN\n\nPROFILE_DATA = os.path.abspath('data/profile.csv')\n\ndf = pd.read_csv(PROFILE_DATA)\n\nprofile_col_names = ['smoky', 'peaty', 'spicy', 'herbal', 'oily', 'full', 'rich', 'sweet', 'briny', 'salty', 'vanilla', 'tart', 'fruity', 'floral']\n\nX = np.array(df[profile_col_names])\n\n# PCA\npca = PCA(n_components=2)\n_ = pca.fit(X)\n\n# clustering\n\ncluster = KMeans(n_clusters=5, random_state=0).fit(X)\n# cluster = MeanShift(bandwidth=2).fit(X)\n# cluster = DBSCAN(eps=5, min_samples=2).fit(X)\n\ndf['cluster'] = cluster.labels_\ndf = df[['id', 'name', 'cluster'] + profile_col_names]\n"
] | [
[
"numpy.array",
"pandas.read_csv",
"sklearn.decomposition.PCA",
"sklearn.cluster.KMeans"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
lakehanne/FormationControl | [
"0d13f641a7fa3720a5b5f38fa957472b68522258"
] | [
"balls_detector/src/tracker.py"
] | [
"#!/usr/bin/env python\n\n'''\nwe subscribe to the published centroids (/locs/tagged & /locs/detected)\nand rearrange the new centroids according to a bijective nearest neighbor rule,\nthen publish the arranged data to a new topic (/locs/ordered)\n\nAuthors:\n 1. Sleiman Safaoui\n 2. Kaveh Fathian\n 3. Olalekan Ogunmolu\n\nJuly 16, 2017\n'''\n\nfrom __future__ import print_function\nimport numpy as np\nimport sys\n\n#ros imports\nimport rospy\nimport roslib\nroslib.load_manifest('balls_detector')\nfrom std_msgs.msg import Float64MultiArray\n\n\nclass TagsGrabber(object):\n '''\n We get the correctly ordered centroids published after tagger.cpp runs, and\n set that order to be the default order (based on which we will track)\n '''\n self.tags_new = []\n def __init__(self):\n self.tag_sub = rospy.Subscriber(\"/locs/tagged\", Float64MultiArray,\n self.callbackT, queue_size=1)\n\n def callbackT(self, data):\n self.tags_new = data;\n\n def get_tags_new(self):\n return self.tags_new\n\nclass PosGrabber(object):\n '''\n We get the newly published centroids.\n '''\n self.pos_new = []\n def __init__(self):\n self.pos_sub = rospy.Subscriber(\"/locs/detected\", Float64MultiArray,\n self.callbackP, queue_size=1)\n\n def callbackP(self, data):\n self.pos_new = data\n\n def get_pos_new(self):\n return self.pos_new\n\nclass Tracker(TagsGrabber, PosGrabber):\n '''\n We rearrange the newly detected centroids to maintain the order of the robots\n as detected in the first frame.\n '''\n def __init__(self):\n TagsGrabber.__init__(self)\n PosGrabber.__init__(self)\n self.pos_pub = rospy.Publisher(\"/locs/ordered\", Float64MultiArray,\n queue_size = 100)\n\n def delay(self, pos_old, pos_ordered): # delay ordering to allow for proper initialization\n pos_ordered = self.get_tags_new()\n pos_old = pos_ordered\n rospy.sleep(0.2)\n return pos_old, pos_ordered\n\n def order(self, first, pos_old, pos_ordered):\n pos_new = self.get_pos_new()\n n_old = len(pos_old.data)/2\n n_new = len(pos_new.data)/2\n if n_new >= n_old: # no occluded robot(s)\n pos_ordered = self.nn_search( n_old, n_new, pos_old, pos_ordered )#######\n else: # robot(s) occluded keep pos_old (nothing new to publish)\n pos_ordered = pos_old\n pos_old = pos_ordered # set ordered pos to be old ones after publishing\n return (pos_old, pos_ordered)\n\n def nn_search(self, n_old, n_new, pos_old, pos_ordered ):\n dist = np.zeros(n_old*n_new) # distance between points\n idx = np.zeros((2,n_old*n_new)) # index of pairs of points (old and new) corresponding to distances in dist\n pt_idx = np.zeros(n_old) # indicies of pos_new matched to pos_old in the latter's order\n\n # find distance between all pairs\n itr = 0\n for i in range(0,n_old):\n for j in range(0,n_new):\n dist[itr] = ( (pos_old.data[int(2*i)] - self.pos_new.data[int(2*j)])**2 +\n (pos_old.data[int(2*i+1)] - self.pos_new.data[int(2*j+1)])**2 )\n idx[0][itr] = i\n idx[1][itr] = j\n itr +=1\n pos_ordered_temp = np.zeros(int(2*n_old)) # temporary array to store pos_ordered\n\n # order according to bijective nearest neighbor search\n for i in range(0, n_old):\n s_idx = sorted(range(len(dist)), key=lambda k: dist[k]) # ascending oreder sort index\n new_pt_idx = idx[1][s_idx[0]] # index of new point with shortest distance\n old_pt_idx = idx[0][s_idx[0]] # index of corresponding old point (with shortest distance)\n # points pos_new(new_pt_idx) and pos_old(old_pt_idx) are paired as being the closes neighbors\n\n # store pos_new(new_pt_idx) at the position old_pt_idx\n # (thus, rearranging the order of pos_new to be the same as pos_old)\n pos_ordered_temp[int(old_pt_idx*2)] = self.pos_new.data[int(new_pt_idx*2)]\n pos_ordered_temp[int(old_pt_idx*2+1)] = self.pos_new.data[int(new_pt_idx*2+1)]\n pt_idx[i] = new_pt_idx\n\n # remove all pairs in idx and dist that correspond to\n # pos_new(new_pt_idx) or pos_old(old_pt_idx)\n rem_idx = np.zeros(len(dist))\n for j in range(len(rem_idx)):\n rem_idx[j] = (idx[1][j] == new_pt_idx) or (idx[0][j] == old_pt_idx)\n for j in range(len(rem_idx)-1, -1, -1):\n if rem_idx[j] == 1:\n idx = np.delete(idx, j, 1)\n dist = np.delete(dist, j)\n\n pos_ordered.data = pos_ordered_temp\n self.pos_pub.publish(pos_ordered)\n return pos_ordered\n\ndef main():\n rospy.init_node(\"tracked_pos\")\n rate = rospy.Rate(30)\n tr = Tracker()\n num_rob = rospy.get_param(\"/tracker/Rob_Params/num_rob\")\n first = 30*(3 + 2*num_rob) # wait loops for proper initialization of pos_old and pos_ordered\n pos_old = []\n pos_ordered = []\n try:\n while not rospy.is_shutdown():\n if not (first == 0):\n (pos_old, pos_ordered) = tr.delay(pos_old, pos_ordered)\n first -=1\n else: # initialization of pos_old and pos_ordered complete --> track\n (pos_old, pos_ordered) = tr.order(first, pos_old, pos_ordered)\n rate.sleep()\n except KeyboardInterrupt:\n print(\"shutting down ros\")\n\nif __name__ == '__main__':\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n"
] | [
[
"numpy.delete",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ppmlguy/fastgradclip | [
"0d8bff42ab13fa3471c520a2823050ccf0ff4a21"
] | [
"fastgc/layers/norm.py"
] | [
"import torch\nimport torch.nn as nn\n\n\nclass InstanceNorm(nn.Module):\n def __init__(self, num_features, eps=1e-5, affine=True):\n super(InstanceNorm, self).__init__()\n self.num_features = num_features\n self.eps = eps\n self.affine = affine\n\n if self.affine:\n self.weight = nn.Parameter(torch.Tensor(1, num_channels, 1, 1))\n self.bias = nn.Parameter(torch.Tensor(1, num_channels, 1, 1))\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.affine:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, x):\n N, C, H, W = x.shape\n\n x = x.view(N, C, -1)\n mean = x.mean(-1, keepdim=True)\n var = x.var(-1, keepdim=True)\n\n normalized = (x - mean) / (var+self.eps).sqrt()\n self.layer_input = normalized.view(N, C, H, W)\n self.pre_activation = x * self.weight + bias\n\n return self.pre_activation\n\n def per_example_gradient(self, deriv_pre_activ):\n N, C = deriv_pre_activ.size(0), deriv_pre_activ.size(1)\n\n dLdZ = deriv_pre_activ\n dLdZ *= N\n X = self.layer_input\n\n pe_grad_weight = (dLdZ * X).view(N, C, -1).sum(-1)\n pe_grad_bias = dLdZ.view(N, C, -1).sum(-1)\n\n return pe_grad_weight, pe_grad_bias\n\n def pe_grad_sqnorm(self, deriv_pre_activ):\n pe_grad_weight, pe_grad_bias = self.per_example_gradient(deriv_pre_activ)\n\n return pe_grad_weight.pow(2).sum(dim=1) + pe_grad_bias.pow(2).sum(dim=1)\n \n\nclass GroupNorm(nn.Module):\n def __init__(self, num_groups, num_channels, eps=1e-5, affine=True): \n super(GroupNorm, self).__init__()\n self.pre_activation = None\n self.layer_input = None\n\n self.num_groups = num_groups\n self.num_channels = num_channels\n self.eps = eps\n self.affine = affine\n if self.affine:\n self.weight = nn.Parameter(torch.Tensor(1, num_groups, 1, 1))\n self.bias = nn.Parameter(torch.Tensor(1, num_groups, 1, 1))\n else:\n self.register_parameter('weight', None)\n self.register_parameter('bias', None)\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.affine:\n init.ones_(self.weight)\n init.zeros_(self.bias)\n\n def forward(self, x):\n N, C, H, W = x.shape\n G = self.num_groups\n\n x = x.view(N, G, -1)\n mean = x.mean(-1, keepdim=True)\n var = x.var(-1, keepdim=True)\n\n normalized = (x - mean) / (var + self.eps).sqrt()\n self.layer_input = normalized.view(N, C, H, W)\n self.pre_activation = self.weight * self.layer_input + self.bias\n\n return self.pre_activation\n\n def per_example_gradient(self, deriv_pre_activ):\n N = deriv_pre_activ.size(0)\n G = self.num_groups\n\n dLdZ = deriv_pre_activ\n dLdZ *= N\n X = self.layer_input\n\n pe_grad_weight = (dLdZ * X).view(N, G, -1).sum(-1)\n pe_grad_bias = dLdZ.view(N, G, -1).sum(-1)\n\n return pe_grad_weight, pe_grad_bias\n\n def pe_grad_sqnorm(self, deriv_pre_activ):\n pe_grad_weight, pe_grad_bias = self.per_example_gradient(deriv_pre_activ)\n\n return pe_grad_weight.pow(2).sum(dim=1) + pe_grad_bias.pow(2).sum(dim=1)\n\n"
] | [
[
"torch.Tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gundamMC/animius | [
"911712ddb992a5f9b00e5a00299a55baf762a9c5"
] | [
"animius/Utils.py"
] | [
"import json\nfrom os.path import join\n\nimport numpy as np\nimport psutil\nimport pynvml\nimport tensorflow as tf\nfrom tensorflow.python.tools import freeze_graph as tf_freeze_graph\nfrom tensorflow.python.tools import optimize_for_inference_lib\n\n\ndef get_system_info():\n system_info = dict()\n\n # cpu info\n system_info['cpu_percent'] = psutil.cpu_percent(interval=None, percpu=False)\n system_info['cpu_count'] = psutil.cpu_count(logical=True)\n\n # memory info\n mem = psutil.virtual_memory()\n system_info['mem_total'] = int(mem.total / 1024 / 1024)\n system_info['mem_available'] = int(mem.available / 1024 / 1024)\n system_info['mem_percent'] = mem.percent\n\n # disk info\n disk = psutil.disk_usage('/')\n system_info['disk_total'] = int(disk.total / 1024 / 1024)\n system_info['disk_used'] = int(disk.used / 1024 / 1024)\n system_info['disk_percent'] = disk.percent\n\n # other info\n system_info['boot_time'] = psutil.boot_time()\n\n # gpu info\n if tf.test.is_gpu_available():\n pynvml.nvmlInit()\n gpu_driver_version = pynvml.nvmlSystemGetDriverVersion()\n system_info['gpu_driver_version'] = gpu_driver_version.decode(\"utf-8\")\n\n gpu_device_count = pynvml.nvmlDeviceGetCount()\n\n system_info['gpu_device_list'] = []\n for i in range(gpu_device_count):\n handle = pynvml.nvmlDeviceGetHandleByIndex(i)\n gpu_name = pynvml.nvmlDeviceGetName(handle)\n gpu_mem = pynvml.nvmlDeviceGetMemoryInfo(handle)\n gpu_mem_total = int(gpu_mem.total / 1024 / 1024)\n gpu_mem_used = int(gpu_mem.used / 1024 / 1024)\n gpu_mem_percent = int(gpu_mem_used / gpu_mem_total)\n\n system_info['gpu_device_list'].append(\n {'gpu_name': gpu_name.decode(\"utf-8\"),\n 'gpu_mem_total': gpu_mem_total,\n 'gpu_mem_used': gpu_mem_used,\n 'gpu_mem_percent': gpu_mem_percent\n }\n )\n\n pynvml.nvmlShutdown()\n\n return system_info\n\n\ndef shuffle(data_lists):\n permutation = list(np.random.permutation(data_lists[0].shape[0]))\n result = []\n for data in data_lists:\n result.append(data[permutation])\n return result\n\n\ndef get_mini_batches(data_lists, mini_batch_size):\n m = data_lists[0].shape[0]\n mini_batches = []\n\n mini_batch_number = int(m / float(mini_batch_size))\n\n for data in data_lists:\n tmp = []\n for batch in range(0, mini_batch_number):\n tmp.append(data[batch * mini_batch_size: (batch + 1) * mini_batch_size])\n\n if m % mini_batch_size != 0:\n tmp.append(data[mini_batch_number * mini_batch_size:])\n\n mini_batches.append(tmp)\n\n return mini_batches\n\n\ndef get_length(sequence):\n used = tf.sign(tf.abs(sequence))\n # reducing the features to scalars of the maximum\n # and then converting them to \"1\"s to create a sequence mask\n # i.e. all \"sequence length\" with \"input length\" values are converted to a scalar of 1\n\n length = tf.reduce_sum(used, reduction_indices=1) # get length by counting how many \"1\"s there are in the sequence\n length = tf.cast(length, tf.int32)\n return length\n\n\ndef sentence_to_index(sentence, word_to_index, max_seq=20, go=False, eos=False):\n if go:\n result = [word_to_index[\"<GO>\"]]\n length = 1\n else:\n result = []\n length = 0\n unk = 0\n for word in sentence:\n length += 1\n if word in word_to_index:\n result.append(word_to_index[word])\n else:\n result.append(word_to_index[\"<UNK>\"])\n unk += 1\n\n if length >= max_seq:\n if eos:\n length = max_seq - 1\n else:\n length = max_seq\n\n result = set_sequence_length(result, word_to_index[\"<EOS>\"], max_seq, force_eos=eos)\n\n return result, length, unk\n\n\ndef set_sequence_length(sequence, pad, max_seq=20, force_eos=False):\n if len(sequence) < max_seq:\n sequence.extend([pad] * (max_seq - len(sequence)))\n\n if force_eos:\n sequence = sequence[:max_seq - 1]\n sequence.append(pad)\n else:\n sequence = sequence[:max_seq]\n\n return sequence\n\n\n# pass model_dir and model_name if model is not loaded\ndef freeze_graph(model, output_node_names, model_dir=None, model_name=None):\n stored = None\n\n if model is not None:\n config = model.config\n model_dir = model.saved_directory\n model_name = model.saved_name\n else:\n with open(join(model_dir, model_name + '.json'), 'r') as f:\n stored = json.load(f)\n config = stored['config']\n\n if 'graph' not in config:\n raise ValueError('No graph found. Save the model with graph=True')\n\n # Retrieve latest checkpoint\n checkpoint = tf.train.get_checkpoint_state(model_dir)\n input_checkpoint = checkpoint.model_checkpoint_path\n\n input_graph = config['graph']\n output_graph = join(model_dir, \"frozen_model.pb\")\n\n clear_devices = True\n\n tf_freeze_graph.freeze_graph(input_graph, None, True,\n input_checkpoint, output_node_names,\n \"\", \"\", output_graph, clear_devices, \"\",\n input_meta_graph=input_checkpoint + \".meta\"\n )\n\n config['frozen_graph'] = output_graph\n\n # save frozen graph location\n with open(join(model_dir, model_name + '.json'), 'w') as f:\n if model is not None:\n json.dump({\n 'config': model.config,\n 'model_structure': model.model_structure,\n 'hyperparameters': model.hyperparameters\n }, f, indent=4)\n else:\n json.dump(stored, f, indent=4)\n\n return output_graph # output graph path\n\n\n# WARNING: optimizing models seem to produce an invalid graph. Don't use it.\n# See: https://github.com/tensorflow/tensorflow/issues/19838\ndef optimize(model, input_node_names, output_node_names, model_dir=None, model_name=None):\n import warnings\n\n warnings.warn('WARNING: Optimizing models seem to produce an invalid graph and should not be used. '\n 'If you wish to undo this, simply remove the \\'optimized_graph\\' line in the config file')\n\n stored = None\n\n if model is not None:\n config = model.config\n model_dir = model.saved_directory\n model_name = model.saved_name\n else:\n with open(join(model_dir, model_name + '.json'), 'r') as f:\n stored = json.load(f)\n config = stored['config']\n\n if 'frozen_graph' in config:\n frozen_graph = config['frozen_graph']\n else:\n if 'graph' not in config:\n raise ValueError('No graph found. Save the model with graph=True')\n else: # the model is not frozen\n frozen_graph = freeze_graph(None, ', '.join(output_node_names), model_dir=model_dir, model_name=model_name)\n config['frozen_graph'] = frozen_graph\n\n inputGraph = tf.GraphDef()\n with tf.gfile.Open(frozen_graph, \"rb\") as f:\n data2read = f.read()\n inputGraph.ParseFromString(data2read)\n\n output_graph = optimize_for_inference_lib.optimize_for_inference(\n inputGraph,\n input_node_names, # an array of the input node(s)\n output_node_names, # an array of output nodes\n tf.int32.as_datatype_enum)\n\n # Save the optimized graph\n tf.train.write_graph(output_graph, model_dir, 'optimized_graph.pb', as_text=False)\n\n # save optimized graph location\n config['optimized_graph'] = join(model_dir, 'optimized_graph.pb')\n\n with open(join(model_dir, model_name + '.json'), 'w') as f:\n if model is not None:\n json.dump({\n 'config': model.config,\n 'model_structure': model.model_structure,\n 'hyperparameters': model.hyperparameters\n }, f, indent=4)\n else:\n json.dump(stored, f, indent=4)\n\n return join(model_dir, 'optimized_graph.pb') # output graph path\n"
] | [
[
"tensorflow.train.get_checkpoint_state",
"tensorflow.python.tools.freeze_graph.freeze_graph",
"tensorflow.gfile.Open",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.python.tools.optimize_for_inference_lib.optimize_for_inference",
"tensorflow.abs",
"numpy.random.permutation",
"tensorflow.test.is_gpu_available",
"tensorflow.GraphDef",
"tensorflow.train.write_graph"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
jbustospelegri/breast_cancer_diagnosis | [
"38eb990ef716912c6acabb443e6eb5c21d9b4e0d"
] | [
"src/algorithms/classification.py"
] | [
"import numpy as np\n\nfrom typing import Callable, io, Union, Tuple\nfrom time import process_time\n\nfrom tensorflow.keras import Sequential, optimizers, callbacks\nfrom tensorflow.keras.backend import count_params, eval\nfrom tensorflow.keras.callbacks import History\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import L2\nfrom tensorflow.keras.constraints import max_norm\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.applications import resnet50, densenet, vgg16, inception_v3\nfrom tensorflow.python.keras.preprocessing.image import DataFrameIterator\nfrom tensorflow.keras.layers import (\n Conv2D, Dropout, MaxPooling2D, Dense, GlobalAveragePooling2D, Input, Flatten\n)\n\nfrom segmentation_models import get_preprocessing\n\nfrom utils.functions import get_path\nfrom utils.config import CLASSIFICATION_LOSS\n\n\nclass GeneralModel:\n \"\"\"\n\n Modelo general para crear nuevas estructuras de red a partir de redes neuronales ya existentes. Adicionalmente\n se incorporan metodos para realizar el ajuste fino de parámetros modificando el número de capas a entrentar.\n Esta clase deberá ser heredada por el resto de clases\n\n \"\"\"\n\n __name__ = 'GeneralModel'\n model: Model = None\n callbakcs = {}\n loss = CLASSIFICATION_LOSS\n metrics = ['accuracy']\n history: History = None\n shape = (224, 224, 3)\n LAYERS_DICT = {\n '0FT': [],\n '1FT': ['block3_maxpool1', 'block3_maxpool2', 'block3_conv1'],\n '2FT': ['block2_maxpool', 'block2_conv1'],\n '3FT': ['block1_maxpool', 'block1_conv1'],\n '4FT': []\n }\n BS_DICT = {\n '0FT': 32,\n '1FT': 32,\n '2FT': 32,\n '3FT': 32,\n '4FT': 32\n }\n\n def __init__(self, n: int = 1, baseline: Model = None, preprocess_func: Callable = None, top_fc: str = 'simple'):\n self.baseline = baseline if baseline is not None else self.create_baseline()\n self.n = n\n self.preprocess_func = preprocess_func\n self.create_model(top_fc)\n\n @staticmethod\n def create_baseline():\n \"\"\"\n Función que permite crear una estructura básica compuesta por un conjunto de capas convolucionales. Este metodo\n será sobreescrito por las clases heredadas.\n \"\"\"\n baseline = Sequential()\n\n baseline.add(Conv2D(16, (5, 5), strides=(1, 1), padding=\"valid\", activation='relu', name='block1_conv1'))\n baseline.add(MaxPooling2D(pool_size=(2, 2), name='block1_maxpool'))\n\n baseline.add(Conv2D(16, (5, 5), strides=(1, 1), padding=\"valid\", activation='relu', name='block2_conv1'))\n baseline.add(MaxPooling2D(pool_size=(2, 2), name='block2_maxpool'))\n\n baseline.add(Conv2D(14, (3, 3), strides=(1, 1), padding=\"same\", activation='relu', name='block3_conv1'))\n baseline.add(MaxPooling2D(pool_size=(2, 2), name='block3_maxpool1'))\n baseline.add(MaxPooling2D(pool_size=(2, 2), name='block3_maxpool2'))\n\n return baseline\n\n def create_model(self, fc_type: str = 'simple'):\n \"\"\"\n Función utilizada para crear la estructura de un modelo. Esta, estará formada por la estructura básica devuelta\n por self.baseline juntamente con las top layers definidas en la función.\n La capa de salida estará compuesta por una capa de salida FC con tantas neuronas como clases existan en el\n dataset y con función de activación softmax.\n\n :param fc_type: acepta los parámetros simple o complex para diferenciar la estructura de las top-layers\n utilzadas.\n \"\"\"\n\n # Entrada del modelo\n input_ = Input(shape=self.shape)\n\n # Baseline\n x = self.baseline(input_, training=False)\n\n # Test con extract features y sigmoide (estructura simple)\n if fc_type == 'simple':\n x = GlobalAveragePooling2D()(x)\n x = Dropout(0.2)(x)\n\n # Test añadiendo capas fully connected (estructura compleja)\n elif fc_type == 'complex':\n x = Flatten()(x)\n x = Dense(512, activation='relu')(x)\n x = Dense(128, activation='relu')(x)\n x = Dense(64, activation='relu', kernel_constraint=max_norm(3))(x)\n x = Dropout(0.25)(x)\n x = Dense(32, activation='relu', kernel_constraint=max_norm(3), activity_regularizer=L2(l2=0.01))(x)\n\n else:\n raise ValueError('Incorrect fc type param')\n\n # Capa de salida de la red\n output = Dense(self.n, activation='softmax')(x)\n\n # Se crea el modelo uniendo todas las capas\n self.model = Model(inputs=input, outputs=output)\n\n def set_trainable_layers(self, unfrozen_layers: str):\n \"\"\"\n Función utilizada para setear layers del modelo como entrenables.\n\n :param unfrozen_layers: string que indica los bloques de capas a descongelar juntamente con todos los bloques\n anteriores en orden alfanumerico. Loas parámetros aceptados son las keys del parámetro self.LAYERS_DICT\n \"\"\"\n\n # Se setean todas las capas a trainable para despues ponerlas en estado no entrenable.\n self.baseline.trainable = True\n\n # Si el modelo se entrena por completo, no se realiza ninguna acción\n if unfrozen_layers == 'ALL':\n pass\n\n # Por el contrario se congelarán todas las capas paramétrizadas en self.LAYERS_DICT\n elif unfrozen_layers in self.LAYERS_DICT.keys():\n # Se ordenan las keys de forma alganumerica\n list_keys = sorted(self.LAYERS_DICT.keys(), key=lambda x: int(x[0]))\n # Se recuperan todas las capas presentes en los values del diccionario cuyas keys sean anteriores o iguales\n # al valor de unfrozen_layers\n train_layers = \\\n [l for layers in list_keys[:list_keys.index(unfrozen_layers) + 1] for l in self.LAYERS_DICT[layers]]\n\n # Se itera generada la lista para congelar los pesos de cada capa.\n for layer in self.baseline.layers:\n if layer.name not in train_layers:\n layer.trainable = False\n\n else:\n raise ValueError(f'Unfrozen layers parametrization for {unfrozen_layers}')\n\n def start_train(self, train_data: DataFrameIterator, val_data: DataFrameIterator, epochs: int,\n opt: optimizers = Adam(1e-3), unfrozen_layers: str = 'ALL') -> Tuple[float, int]:\n \"\"\"\n Función que compila el modelo, añade los callbacks definidos por el usuario y entrena el modelo\n :param train_data: dataframe iterator con los datos de train\n :param val_data: dataframe iterator con los datos de validación\n :param epochs: número de épocas con las que realizar el entrenamiento\n :param unfrozen_layers: key de self.LAYERS_DICT indicando las capas a congelar\n :param opt: algorítmo de optimización de gradiente descendente\n :return tiempo de entrenamiento y el número de épocas utilizadas para entrenar el modelo\n \"\"\"\n\n # Se configura si los layers serán entrenables o no\n self.set_trainable_layers(unfrozen_layers=unfrozen_layers)\n\n # Se compila el modelo\n self.model.compile(optimizer=opt, loss=self.loss, metrics=self.metrics)\n\n # Enternamiento del modelo\n start = process_time()\n self.history = self.model.fit(\n train_data,\n epochs=epochs,\n validation_data=val_data,\n verbose=2,\n callbacks=list(self.callbakcs.values())\n )\n return process_time() - start, len(self.history.history['loss'])\n\n def register_metric(self, *args: Union[Callable, str]):\n \"\"\"\n Función para registrar las métricas a representar durante el entrenamiento del modelo\n :param args: lista con métricas a representar. Se acepta una función o bien un string de sklearn.models.metrics.\n \"\"\"\n for arg in args:\n self.metrics.append(arg)\n\n def register_callback(self, **kargs: callbacks):\n \"\"\"\n Función para registrar los callbacks a ejecutar durante el entrenamiento del modelo.\n :param kargs: Diccionario con un nombre de callback y una función\n \"\"\"\n self.callbakcs = {**self.callbakcs, **kargs}\n\n def train_from_scratch(self, train_data: DataFrameIterator, val_data: DataFrameIterator, epochs: int,\n opt: optimizers = None) -> Tuple[float, int]:\n \"\"\"\n Función para entrenar completamente un modelo\n :param train_data: dataframe iterator con los datos de train\n :param val_data: dataframe iterator con los datos de validación\n :param epochs: número de épocas con las que realizar el entrenamiento\n :param opt: algorítmo de optimización de gradiente descendente\n :return tiempo de entrenamiento y el número de épocas utilizadas para entrenar el modelo\n \"\"\"\n t, e = self.start_train(train_data, val_data, epochs, opt, unfrozen_layers='ALL')\n return t, e\n\n def extract_features(self, train_data, val_data, epochs: int, opt: optimizers = None) -> Tuple[float, int]:\n \"\"\"\n Función utilizada para aplicar un proceso de extract features de modo que se conjela la arquitectura definida en\n self.baseline y se entrenan las últimas capas de la arquitectura.\n :param train_data: dataframe iterator con los datos de train\n :param val_data: dataframe iterator con los datos de validación\n :param epochs: número de épocas con las que realizar el entrenamiento\n :param opt: algorítmo de optimización de gradiente descendente\n :return tiempo de entrenamiento y el número de épocas utilizadas para entrenar el modelo\n \"\"\"\n t, e = self.start_train(train_data, val_data, epochs, opt, unfrozen_layers='0FT')\n return t, e\n\n def fine_tunning(self, train_data, val_data, epochs: int, opt: optimizers = None, unfrozen_layers: str = '1FT') \\\n -> Tuple[float, int]:\n \"\"\"\n Función utilizada para aplicar un proceso de transfer learning de modo que se conjelan n - k capas. Las k capas\n entrenables en la arquitectura definida por self.baseline se determinarán a partir del método\n set_trainable_layers.\n :param train_data: dataframe iterator con los datos de train\n :param val_data: dataframe iterator con los datos de validación\n :param epochs: número de épocas con las que realizar el entrenamiento\n :param opt: algorítmo de optimización de gradiente descendente\n :param unfrozen_layers: key de self.LAYERS_DICT indicando las capas a congelar\n :return tiempo de entrenamiento y el número de épocas utilizadas para entrenar el modelo\n \"\"\"\n t, e = self.start_train(train_data, val_data, epochs, opt, unfrozen_layers=unfrozen_layers)\n return t, e\n\n def save_weights(self, dirname: str, model_name: str):\n \"\"\"\n Función utilizada para almacenar los pesos del modelo entrenado\n :param dirname: directorio en el cual se almacenara el modelocv\n :param model_name: nombre del archivo para almacenar el modelo\n \"\"\"\n # Se congelan las capas de forma previa a almacenar el modelo.\n self.freeze_layers()\n # Se almacenan los pesos\n self.model.save_weights(get_path(dirname, model_name))\n\n def load_weigths(self, weights: io):\n self.freeze_layers()\n self.model.load_weights(weights)\n\n def predict(self, *args, **kwargs):\n \"\"\"\n Función utilizada para generar las predicciones de un conjunto de datos dado\n \"\"\"\n return self.model.predict(*args, **kwargs)\n\n def get_trainable_layers(self) -> int:\n \"\"\"\n Función para recuperar el número de capas entrenables en el baseline\n \"\"\"\n return len([l for l in self.baseline.layers if l.trainable])\n\n def get_trainable_params(self) -> int:\n \"\"\"\n Función para recuperar el número de parámetros entrenados en el baseline\n \"\"\"\n return int(np.sum([\n count_params(p) for lay in self.baseline.layers for p in lay.trainable_weights if lay.trainable\n ]))\n\n def get_learning_rate(self) -> float:\n \"\"\"\n Función para recuperar el learning rate del modelo\n \"\"\"\n return eval(self.model.optimizer.lr)\n\n def freeze_layers(self):\n \"\"\"\n Función para congelar todas las capas de un modelo\n \"\"\"\n # Se iteran las capas del modelo para congelarlas. En caso de que una capa sea un modelo, se iterarán sus capas\n # para congelarlas.\n for i in self.model.layers:\n i.trainable = False\n if isinstance(i, Model):\n for layer in i.layers:\n layer.trainable = False\n\n\nclass VGG16Model(GeneralModel):\n \"\"\"\n Arquitectura VGG16 en la cual se definen las capas a congelar y los tamaños de size máximos a utilizar\n según el número de parámetros congelados.\n \"\"\"\n __name__ = 'VGG16'\n LAYERS_DICT = {\n '0FT': [],\n '1FT': ['block5_conv1', 'block5_conv2', 'block5_conv3'],\n '2FT': ['block4_conv1', 'block4_conv2', 'block4_conv3'],\n '3FT': ['block3_conv1', 'block3_conv2', 'block3_conv3'],\n '4FT': ['block2_conv1', 'block2_conv2']\n }\n BS_DICT = {\n '0FT': 96,\n '1FT': 88,\n '2FT': 80,\n '3FT': 72,\n '4FT': 54,\n 'ALL': 24\n }\n\n def __init__(self, n: int, weights: Union[str, io] = None, top_fc: str = 'simple'):\n super(VGG16Model, self).__init__(\n n=n, baseline=vgg16.VGG16(include_top=False, weights=weights, input_shape=self.shape), top_fc=top_fc,\n preprocess_func=vgg16.preprocess_input\n )\n\n @staticmethod\n def get_preprocessing_func() -> Callable:\n \"\"\"\n Función que retorna la función de preprocesado própia de VGG16\n :return: Función de preprocesado de la arquitectura de red\n \"\"\"\n return get_preprocessing('vgg16')\n\n\nclass ResNet50Model(GeneralModel):\n \"\"\"\n Arquitectura Resnet50 en la cual se definen las capas a congelar y los tamaños de size máximos a utilizar\n según el número de parámetros congelados.\n \"\"\"\n __name__ = 'ResNet50'\n LAYERS_DICT = {\n '0FT': [],\n '1FT': [\n 'conv5_block3_1_conv', 'conv5_block3_2_conv', 'conv5_block3_3_conv', 'conv5_block2_1_conv',\n 'conv5_block2_2_conv', 'conv5_block2_3_conv', 'conv5_block1_1_conv', 'conv5_block1_2_conv',\n 'conv5_block1_0_conv', 'conv5_block1_3_conv', 'conv5_block1_1_bn', 'conv5_block1_2_bn',\n 'conv5_block1_0_bn', 'conv5_block1_3_bn', 'conv5_block2_1_bn', 'conv5_block2_2_bn', 'conv5_block2_3_bn',\n 'conv5_block3_1_bn', 'conv5_block3_2_bn', 'conv5_block3_3_bn'\n ],\n '2FT': [\n 'conv4_block1_1_conv', 'conv4_block1_2_conv', 'conv4_block1_0_conv', 'conv4_block1_3_conv',\n 'conv4_block2_1_conv', 'conv4_block2_2_conv', 'conv4_block2_3_conv', 'conv4_block3_1_conv',\n 'conv4_block3_2_conv', 'conv4_block3_3_conv', 'conv4_block4_1_conv', 'conv4_block4_2_conv',\n 'conv4_block4_3_conv', 'conv4_block5_1_conv', 'conv4_block5_2_conv', 'conv4_block5_3_conv',\n 'conv4_block6_1_conv', 'conv4_block6_2_conv', 'conv4_block6_3_conv', 'conv4_block1_1_bn',\n 'conv4_block1_2_bn', 'conv4_block1_0_bn', 'conv4_block1_3_bn', 'conv4_block2_1_bn', 'conv4_block2_2_bn',\n 'conv4_block2_3_bn', 'conv4_block3_1_bn', 'conv4_block3_2_bn', 'conv4_block3_3_bn', 'conv4_block4_1_bn',\n 'conv4_block4_2_bn', 'conv4_block4_3_bn', 'conv4_block5_1_bn', 'conv4_block5_2_bn', 'conv4_block5_3_bn',\n 'conv4_block6_1_bn', 'conv4_block6_2_bn', 'conv4_block6_3_bn'\n ],\n '3FT': [\n 'conv3_block1_1_conv', 'conv3_block1_2_conv', 'conv3_block1_0_conv', 'conv3_block1_3_conv',\n 'conv3_block2_1_conv', 'conv3_block2_2_conv', 'conv3_block2_3_conv', 'conv3_block3_1_conv',\n 'conv3_block3_2_conv', 'conv3_block3_3_conv', 'conv3_block4_1_conv', 'conv3_block4_2_conv',\n 'conv3_block4_3_conv', 'conv3_block1_1_bn', 'conv3_block1_2_bn', 'conv3_block1_0_bn', 'conv3_block1_3_bn',\n 'conv3_block2_1_bn', 'conv3_block2_2_bn', 'conv3_block2_3_bn', 'conv3_block3_1_bn', 'conv3_block3_2_bn',\n 'conv3_block3_3_bn', 'conv3_block4_1_bn', 'conv3_block4_2_bn', 'conv3_block4_3_bn'\n ],\n '4FT': [\n 'conv2_block1_1_conv', 'conv2_block1_2_conv', 'conv2_block1_0_conv', 'conv2_block1_3_conv',\n 'conv2_block2_1_conv', 'conv2_block2_2_conv', 'conv2_block2_3_conv', 'conv2_block3_1_conv',\n 'conv2_block3_2_conv', 'conv2_block3_3_conv', 'conv2_block1_1_bn', 'conv2_block1_2_bn', 'conv2_block1_0_bn',\n 'conv2_block1_3_bn', 'conv2_block2_1_bn', 'conv2_block2_2_bn', 'conv2_block2_3_bn', 'conv2_block3_1_bn',\n 'conv2_block3_2_bn', 'conv2_block3_3_bn'\n ]\n }\n BS_DICT = {\n '0FT': 128,\n '1FT': 128,\n '2FT': 112,\n '3FT': 52,\n '4FT': 28,\n 'ALL': 22\n }\n shape = (224, 224, 3)\n\n def __init__(self, n: int, weights: Union[str, io] = None, top_fc: str = 'simple'):\n super(ResNet50Model, self).__init__(\n n=n, baseline=resnet50.ResNet50(include_top=False, weights=weights, input_shape=self.shape), top_fc=top_fc,\n preprocess_func=resnet50.preprocess_input\n )\n\n @staticmethod\n def get_preprocessing_func() -> Callable:\n \"\"\"\n Función que retorna la función de preprocesado própia de resnet50\n :return: Función de preprocesado de la arquitectura de red\n \"\"\"\n return get_preprocessing('resnet50')\n\n\nclass InceptionV3Model(GeneralModel):\n \"\"\"\n Arquitectura InceptionV3 en la cual se definen las capas a congelar y los tamaños de size máximos a utilizar\n según el número de parámetros congelados.\n \"\"\"\n __name__ = 'InceptionV3'\n LAYERS_DICT = {\n '0FT': [],\n '1FT': [\n 'conv2d_89', 'conv2d_90', 'conv2d_91', 'conv2d_92', 'conv2d_86', 'conv2d_87', 'conv2d_88', 'conv2d_93',\n 'conv2d_85', 'batch_normalization_89', 'batch_normalization_90', 'batch_normalization_91',\n 'batch_normalization_92', 'batch_normalization_86', 'batch_normalization_87', 'batch_normalization_88',\n 'batch_normalization_93', 'batch_normalization_85'\n ],\n '2FT': [\n 'conv2d_80', 'conv2d_81', 'conv2d_82', 'conv2d_83', 'conv2d_77', 'conv2d_78', 'conv2d_79', 'conv2d_84',\n 'conv2d_76', 'batch_normalization_80', 'batch_normalization_81', 'batch_normalization_82',\n 'batch_normalization_83', 'batch_normalization_77', 'batch_normalization_78', 'batch_normalization_79',\n 'batch_normalization_84', 'batch_normalization_76',\n ],\n '3FT': [\n 'conv2d_72', 'conv2d_73', 'conv2d_74', 'conv2d_75', 'conv2d_70', 'conv2d_71', 'batch_normalization_72',\n 'batch_normalization_73', 'batch_normalization_74', 'batch_normalization_75', 'batch_normalization_70',\n 'batch_normalization_71'\n ],\n '4FT': [\n 'conv2d_64', 'conv2d_65', 'conv2d_66', 'conv2d_67', 'conv2d_68', 'conv2d_61', 'conv2d_62', 'conv2d_63',\n 'conv2d_69', 'conv2d_60', 'batch_normalization_64', 'batch_normalization_65', 'batch_normalization_66',\n 'batch_normalization_67', 'batch_normalization_68', 'batch_normalization_61', 'batch_normalization_62',\n 'batch_normalization_63', 'batch_normalization_69', 'batch_normalization_60',\n ]\n }\n BS_DICT = {\n '0FT': 128,\n '1FT': 128,\n '2FT': 128,\n '3FT': 128,\n '4FT': 128,\n 'ALL': 22\n }\n shape = (299, 299, 3)\n\n def __init__(self, n: int, weights: Union[str, io] = None, top_fc: str = 'simple'):\n super(InceptionV3Model, self).__init__(\n n=n, baseline=inception_v3.InceptionV3(include_top=False, weights=weights, input_shape=self.shape),\n preprocess_func=inception_v3.preprocess_input, top_fc=top_fc\n )\n\n @staticmethod\n def get_preprocessing_func() -> Callable:\n \"\"\"\n Función que retorna la función de preprocesado própia de InceptionV3\n :return: Función de preprocesado de la arquitectura de red\n \"\"\"\n return get_preprocessing('inceptionv3')\n\n\nclass DenseNetModel(GeneralModel):\n \"\"\"\n Arquitectura Densenet121 en la cual se definen las capas a congelar y los tamaños de size máximos a utilizar\n según el número de parámetros congelados.\n \"\"\"\n __name__ = 'DenseNet'\n LAYERS_DICT = {\n '0FT': [],\n '1FT': [\n 'conv5_block1_1_conv', 'conv5_block1_2_conv', 'conv5_block2_1_conv', 'conv5_block2_2_conv',\n 'conv5_block3_1_conv', 'conv5_block3_2_conv', 'conv5_block4_1_conv', 'conv5_block4_2_conv',\n 'conv5_block5_1_conv', 'conv5_block5_2_conv', 'conv5_block6_1_conv', 'conv5_block6_2_conv',\n 'conv5_block7_1_conv', 'conv5_block7_2_conv', 'conv5_block8_1_conv', 'conv5_block8_2_conv',\n 'conv5_block9_1_conv', 'conv5_block9_2_conv', 'conv5_block10_1_conv', 'conv5_block10_2_conv',\n 'conv5_block11_1_conv', 'conv5_block11_2_conv', 'conv5_block12_1_conv', 'conv5_block12_2_conv',\n 'conv5_block13_1_conv', 'conv5_block13_2_conv', 'conv5_block14_1_conv', 'conv5_block14_2_conv',\n 'conv5_block15_1_conv', 'conv5_block15_2_conv', 'conv5_block16_1_conv', 'conv5_block16_2_conv',\n 'conv5_block1_0_bn', 'conv5_block1_1_bn', 'conv5_block2_0_bn', 'conv5_block2_1_bn', 'conv5_block3_0_bn',\n 'conv5_block3_1_bn', 'conv5_block4_0_bn', 'conv5_block4_1_bn', 'conv5_block5_0_bn', 'conv5_block5_1_bn',\n 'conv5_block6_0_bn', 'conv5_block6_1_bn', 'conv5_block7_0_bn', 'conv5_block7_1_bn', 'conv5_block8_0_bn',\n 'conv5_block8_1_bn', 'conv5_block9_0_bn', 'conv5_block9_1_bn', 'conv5_block10_0_bn', 'conv5_block10_1_bn',\n 'conv5_block11_0_bn', 'conv5_block11_1_bn', 'conv5_block12_0_bn', 'conv5_block12_1_bn',\n 'conv5_block13_0_bn', 'conv5_block13_1_bn', 'conv5_block14_0_bn', 'conv5_block14_1_bn',\n 'conv5_block15_0_bn', 'conv5_block15_1_bn', 'conv5_block16_0_bn', 'conv5_block16_1_bn', 'bn'\n ],\n '2FT': [\n 'conv4_block1_1_conv', 'conv4_block1_2_conv', 'conv4_block2_1_conv', 'conv4_block2_2_conv',\n 'conv4_block3_1_conv', 'conv4_block3_2_conv', 'conv4_block4_1_conv', 'conv4_block4_2_conv',\n 'conv4_block5_1_conv', 'conv4_block5_2_conv', 'conv4_block6_1_conv', 'conv4_block6_2_conv',\n 'conv4_block7_1_conv', 'conv4_block7_2_conv', 'conv4_block8_1_conv', 'conv4_block8_2_conv',\n 'conv4_block9_1_conv', 'conv4_block9_2_conv', 'conv4_block10_1_conv', 'conv4_block10_2_conv',\n 'conv4_block11_1_conv', 'conv4_block11_2_conv', 'conv4_block12_1_conv', 'conv4_block12_2_conv',\n 'conv4_block13_1_conv', 'conv4_block13_2_conv', 'conv4_block14_1_conv', 'conv4_block14_2_conv',\n 'conv4_block15_1_conv', 'conv4_block15_2_conv', 'conv4_block16_1_conv', 'conv4_block16_2_conv',\n 'conv4_block17_1_conv', 'conv4_block17_2_conv', 'conv4_block18_1_conv', 'conv4_block18_2_conv',\n 'conv4_block19_1_conv', 'conv4_block19_2_conv', 'conv4_block20_1_conv', 'conv4_block20_2_conv',\n 'conv4_block21_1_conv', 'conv4_block21_2_conv', 'conv4_block22_1_conv', 'conv4_block22_2_conv',\n 'conv4_block23_1_conv', 'conv4_block23_2_conv', 'conv4_block24_1_conv', 'conv4_block24_2_conv',\n 'conv4_block1_0_bn', 'conv4_block1_1_bn', 'conv4_block2_0_bn', 'conv4_block2_1_bn', 'conv4_block3_0_bn',\n 'conv4_block3_1_bn', 'conv4_block4_0_bn', 'conv4_block4_1_bn', 'conv4_block5_0_bn', 'conv4_block5_1_bn',\n 'conv4_block6_0_bn', 'conv4_block6_1_bn', 'conv4_block7_0_bn', 'conv4_block7_1_bn', 'conv4_block8_0_bn',\n 'conv4_block8_1_bn', 'conv4_block9_0_bn', 'conv4_block9_1_bn', 'conv4_block10_0_bn', 'conv4_block10_1_bn',\n 'conv4_block11_0_bn', 'conv4_block11_1_bn', 'conv4_block12_0_bn', 'conv4_block12_1_bn',\n 'conv4_block13_0_bn', 'conv4_block13_1_bn', 'conv4_block14_0_bn', 'conv4_block14_1_bn',\n 'conv4_block15_0_bn', 'conv4_block15_1_bn', 'conv4_block16_0_bn', 'conv4_block16_1_bn',\n 'conv4_block17_0_bn', 'conv4_block17_1_bn', 'conv4_block18_0_bn', 'conv4_block18_1_bn',\n 'conv4_block19_0_bn', 'conv4_block19_1_bn', 'conv4_block20_0_bn', 'conv4_block20_1_bn',\n 'conv4_block21_0_bn', 'conv4_block21_1_bn', 'conv4_block22_0_bn', 'conv4_block22_1_bn',\n 'conv4_block23_0_bn', 'conv4_block23_1_bn', 'conv4_block24_0_bn', 'conv4_block24_1_bn',\n 'pool2_bn', 'pool2_conv'\n ],\n '3FT': [\n 'conv3_block1_1_conv', 'conv3_block1_2_conv', 'conv3_block2_1_conv', 'conv3_block2_2_conv',\n 'conv3_block3_1_conv', 'conv3_block3_2_conv', 'conv3_block4_1_conv', 'conv3_block4_2_conv',\n 'conv3_block5_1_conv', 'conv3_block5_2_conv', 'conv3_block6_1_conv', 'conv3_block6_2_conv',\n 'conv3_block7_1_conv', 'conv3_block7_2_conv', 'conv3_block8_1_conv', 'conv3_block8_2_conv',\n 'conv3_block9_1_conv', 'conv3_block9_2_conv', 'conv3_block10_1_conv', 'conv3_block10_2_conv',\n 'conv3_block11_1_conv', 'conv3_block11_2_conv', 'conv3_block12_1_conv', 'conv3_block12_2_conv',\n 'conv3_block1_0_bn', 'conv3_block1_1_bn', 'conv3_block2_0_bn', 'conv3_block2_1_bn', 'conv3_block3_0_bn',\n 'conv3_block3_1_bn', 'conv3_block4_0_bn', 'conv3_block4_1_bn', 'conv3_block5_0_bn', 'conv3_block5_1_bn',\n 'conv3_block6_0_bn', 'conv3_block6_1_bn', 'conv3_block7_0_bn', 'conv3_block7_1_bn', 'conv3_block8_0_bn',\n 'conv3_block8_1_bn', 'conv3_block9_0_bn', 'conv3_block9_1_bn', 'conv3_block10_0_bn', 'conv3_block10_1_bn',\n 'conv3_block11_0_bn', 'conv3_block11_1_bn', 'conv3_block12_0_bn', 'conv3_block12_1_bn',\n 'pool3_bn', 'pool3_conv'\n ],\n '4FT': [\n 'conv2_block1_1_conv', 'conv2_block1_2_conv', 'conv2_block2_1_conv', 'conv2_block2_2_conv',\n 'conv2_block3_1_conv', 'conv2_block3_2_conv', 'conv2_block4_1_conv', 'conv2_block4_2_conv',\n 'conv2_block5_1_conv', 'conv2_block5_2_conv', 'conv2_block6_1_conv', 'conv2_block6_2_conv',\n 'conv2_block1_0_bn', 'conv2_block1_1_bn', 'conv2_block2_0_bn', 'conv2_block2_1_bn', 'conv2_block3_0_bn',\n 'conv2_block3_1_bn', 'conv2_block4_0_bn', 'conv2_block4_1_bn', 'conv2_block5_0_bn', 'conv2_block5_1_bn',\n 'conv2_block6_0_bn', 'conv2_block6_1_bn', 'pool4_bn', 'pool4_conv'\n ]\n }\n BS_DICT = {\n '0FT': 128,\n '1FT': 128,\n '2FT': 26,\n '3FT': 24,\n '4FT': 20,\n 'ALL': 18\n }\n\n def __init__(self, n: int, weights: Union[str, io] = None, top_fc: str = 'simple'):\n super(DenseNetModel, self).__init__(\n n=n, baseline=densenet.DenseNet121(include_top=False, weights=weights, input_shape=self.shape),\n preprocess_func=densenet.preprocess_input, top_fc=top_fc\n )\n\n @staticmethod\n def get_preprocessing_func() -> Callable:\n \"\"\"\n Función que retorna la función de preprocesado própia de densenet121\n :return: Función de preprocesado de la arquitectura de red\n \"\"\"\n return get_preprocessing('densenet121')\n"
] | [
[
"tensorflow.keras.applications.resnet50.ResNet50",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.constraints.max_norm",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.count_params",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.L2",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.backend.eval",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.applications.densenet.DenseNet121",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.applications.inception_v3.InceptionV3",
"tensorflow.keras.layers.Input"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.3",
"2.4",
"2.5",
"2.6"
]
}
] |
rj-renan/renan--python-microservice | [
"61b93785dbed8192d4c79801dbf770cda6e5770f"
] | [
"server/services/math_services.py"
] | [
"import numpy as np\n\n\ndef csv_to_list(csv_string):\n \"\"\"\n Converts a string with comma-separated integer values to a Python list of integers.\n Receives\n --------\n csv_string : string\n Comma-separated integer values.\n Returns\n -------\n integer_list : list\n List of integer values.\n \"\"\"\n\n string_list = csv_string.split(',')\n integer_list = list(map(int, string_list))\n return integer_list\n\n\ndef list_to_array(integer_list):\n \"\"\"\n Converts a list of integer values to an integer NumPy array.\n Receives\n --------\n integer_list : list\n List of integer values.\n Returns\n -------\n integer_array : numpy.array\n Numpy array of integer values.\n \"\"\"\n\n integer_array = np.array(integer_list, dtype=int)\n return integer_array\n\n\ndef calculate_mean(integer_list):\n \"\"\"\n Calculates the mean of a list of integer values.\n Receives\n --------\n integer_list : list\n List of integer values.\n Returns\n -------\n mean : double\n Mean value of the list.\n \"\"\"\n\n my_array = list_to_array(integer_list)\n mean = my_array.mean()\nreturn mean"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
staaker/sympy | [
"946be880abad6186c74eb3a10d608bebbcaf0851"
] | [
"sympy/matrices/tests/test_matrices.py"
] | [
"import random\n\nfrom sympy import (\n Abs, Add, E, Float, I, Integer, Max, Min, N, Poly, Pow, PurePoly, Rational,\n S, Symbol, cos, exp, expand_mul, oo, pi, signsimp, simplify, sin, sqrt, symbols,\n sympify, trigsimp, tan, sstr, diff, Function)\nfrom sympy.matrices.matrices import (ShapeError, MatrixError,\n NonSquareMatrixError, DeferredVector, _find_reasonable_pivot_naive,\n _simplify)\nfrom sympy.matrices import (\n GramSchmidt, ImmutableMatrix, ImmutableSparseMatrix, Matrix,\n SparseMatrix, casoratian, diag, eye, hessian,\n matrix_multiply_elementwise, ones, randMatrix, rot_axis1, rot_axis2,\n rot_axis3, wronskian, zeros, MutableDenseMatrix, ImmutableDenseMatrix, MatrixSymbol)\nfrom sympy.core.compatibility import long, iterable, range, Hashable\nfrom sympy.core import Tuple\nfrom sympy.utilities.iterables import flatten, capture\nfrom sympy.utilities.pytest import raises, XFAIL, slow, skip, warns_deprecated_sympy\nfrom sympy.solvers import solve\nfrom sympy.assumptions import Q\nfrom sympy.tensor.array import Array\nfrom sympy.matrices.expressions import MatPow\n\nfrom sympy.abc import a, b, c, d, x, y, z, t\n\n# don't re-order this list\nclasses = (Matrix, SparseMatrix, ImmutableMatrix, ImmutableSparseMatrix)\n\n\ndef test_args():\n for c, cls in enumerate(classes):\n m = cls.zeros(3, 2)\n # all should give back the same type of arguments, e.g. ints for shape\n assert m.shape == (3, 2) and all(type(i) is int for i in m.shape)\n assert m.rows == 3 and type(m.rows) is int\n assert m.cols == 2 and type(m.cols) is int\n if not c % 2:\n assert type(m._mat) in (list, tuple, Tuple)\n else:\n assert type(m._smat) is dict\n\n\ndef test_division():\n v = Matrix(1, 2, [x, y])\n assert v.__div__(z) == Matrix(1, 2, [x/z, y/z])\n assert v.__truediv__(z) == Matrix(1, 2, [x/z, y/z])\n assert v/z == Matrix(1, 2, [x/z, y/z])\n\n\ndef test_sum():\n m = Matrix([[1, 2, 3], [x, y, x], [2*y, -50, z*x]])\n assert m + m == Matrix([[2, 4, 6], [2*x, 2*y, 2*x], [4*y, -100, 2*z*x]])\n n = Matrix(1, 2, [1, 2])\n raises(ShapeError, lambda: m + n)\n\ndef test_abs():\n m = Matrix(1, 2, [-3, x])\n n = Matrix(1, 2, [3, Abs(x)])\n assert abs(m) == n\n\ndef test_addition():\n a = Matrix((\n (1, 2),\n (3, 1),\n ))\n\n b = Matrix((\n (1, 2),\n (3, 0),\n ))\n\n assert a + b == a.add(b) == Matrix([[2, 4], [6, 1]])\n\n\ndef test_fancy_index_matrix():\n for M in (Matrix, SparseMatrix):\n a = M(3, 3, range(9))\n assert a == a[:, :]\n assert a[1, :] == Matrix(1, 3, [3, 4, 5])\n assert a[:, 1] == Matrix([1, 4, 7])\n assert a[[0, 1], :] == Matrix([[0, 1, 2], [3, 4, 5]])\n assert a[[0, 1], 2] == a[[0, 1], [2]]\n assert a[2, [0, 1]] == a[[2], [0, 1]]\n assert a[:, [0, 1]] == Matrix([[0, 1], [3, 4], [6, 7]])\n assert a[0, 0] == 0\n assert a[0:2, :] == Matrix([[0, 1, 2], [3, 4, 5]])\n assert a[:, 0:2] == Matrix([[0, 1], [3, 4], [6, 7]])\n assert a[::2, 1] == a[[0, 2], 1]\n assert a[1, ::2] == a[1, [0, 2]]\n a = M(3, 3, range(9))\n assert a[[0, 2, 1, 2, 1], :] == Matrix([\n [0, 1, 2],\n [6, 7, 8],\n [3, 4, 5],\n [6, 7, 8],\n [3, 4, 5]])\n assert a[:, [0,2,1,2,1]] == Matrix([\n [0, 2, 1, 2, 1],\n [3, 5, 4, 5, 4],\n [6, 8, 7, 8, 7]])\n\n a = SparseMatrix.zeros(3)\n a[1, 2] = 2\n a[0, 1] = 3\n a[2, 0] = 4\n assert a.extract([1, 1], [2]) == Matrix([\n [2],\n [2]])\n assert a.extract([1, 0], [2, 2, 2]) == Matrix([\n [2, 2, 2],\n [0, 0, 0]])\n assert a.extract([1, 0, 1, 2], [2, 0, 1, 0]) == Matrix([\n [2, 0, 0, 0],\n [0, 0, 3, 0],\n [2, 0, 0, 0],\n [0, 4, 0, 4]])\n\n\ndef test_multiplication():\n a = Matrix((\n (1, 2),\n (3, 1),\n (0, 6),\n ))\n\n b = Matrix((\n (1, 2),\n (3, 0),\n ))\n\n c = a*b\n assert c[0, 0] == 7\n assert c[0, 1] == 2\n assert c[1, 0] == 6\n assert c[1, 1] == 6\n assert c[2, 0] == 18\n assert c[2, 1] == 0\n\n try:\n eval('c = a @ b')\n except SyntaxError:\n pass\n else:\n assert c[0, 0] == 7\n assert c[0, 1] == 2\n assert c[1, 0] == 6\n assert c[1, 1] == 6\n assert c[2, 0] == 18\n assert c[2, 1] == 0\n\n h = matrix_multiply_elementwise(a, c)\n assert h == a.multiply_elementwise(c)\n assert h[0, 0] == 7\n assert h[0, 1] == 4\n assert h[1, 0] == 18\n assert h[1, 1] == 6\n assert h[2, 0] == 0\n assert h[2, 1] == 0\n raises(ShapeError, lambda: matrix_multiply_elementwise(a, b))\n\n c = b * Symbol(\"x\")\n assert isinstance(c, Matrix)\n assert c[0, 0] == x\n assert c[0, 1] == 2*x\n assert c[1, 0] == 3*x\n assert c[1, 1] == 0\n\n c2 = x * b\n assert c == c2\n\n c = 5 * b\n assert isinstance(c, Matrix)\n assert c[0, 0] == 5\n assert c[0, 1] == 2*5\n assert c[1, 0] == 3*5\n assert c[1, 1] == 0\n\n try:\n eval('c = 5 @ b')\n except SyntaxError:\n pass\n else:\n assert isinstance(c, Matrix)\n assert c[0, 0] == 5\n assert c[0, 1] == 2*5\n assert c[1, 0] == 3*5\n assert c[1, 1] == 0\n\n\ndef test_power():\n raises(NonSquareMatrixError, lambda: Matrix((1, 2))**2)\n\n R = Rational\n A = Matrix([[2, 3], [4, 5]])\n assert (A**-3)[:] == [R(-269)/8, R(153)/8, R(51)/2, R(-29)/2]\n assert (A**5)[:] == [6140, 8097, 10796, 14237]\n A = Matrix([[2, 1, 3], [4, 2, 4], [6, 12, 1]])\n assert (A**3)[:] == [290, 262, 251, 448, 440, 368, 702, 954, 433]\n assert A**0 == eye(3)\n assert A**1 == A\n assert (Matrix([[2]]) ** 100)[0, 0] == 2**100\n assert eye(2)**10000000 == eye(2)\n assert Matrix([[1, 2], [3, 4]])**Integer(2) == Matrix([[7, 10], [15, 22]])\n\n A = Matrix([[33, 24], [48, 57]])\n assert (A**(S(1)/2))[:] == [5, 2, 4, 7]\n A = Matrix([[0, 4], [-1, 5]])\n assert (A**(S(1)/2))**2 == A\n\n assert Matrix([[1, 0], [1, 1]])**(S(1)/2) == Matrix([[1, 0], [S.Half, 1]])\n assert Matrix([[1, 0], [1, 1]])**0.5 == Matrix([[1.0, 0], [0.5, 1.0]])\n from sympy.abc import a, b, n\n assert Matrix([[1, a], [0, 1]])**n == Matrix([[1, a*n], [0, 1]])\n assert Matrix([[b, a], [0, b]])**n == Matrix([[b**n, a*b**(n-1)*n], [0, b**n]])\n assert Matrix([[a, 1, 0], [0, a, 1], [0, 0, a]])**n == Matrix([\n [a**n, a**(n-1)*n, a**(n-2)*(n-1)*n/2],\n [0, a**n, a**(n-1)*n],\n [0, 0, a**n]])\n assert Matrix([[a, 1, 0], [0, a, 0], [0, 0, b]])**n == Matrix([\n [a**n, a**(n-1)*n, 0],\n [0, a**n, 0],\n [0, 0, b**n]])\n\n A = Matrix([[1, 0], [1, 7]])\n assert A._matrix_pow_by_jordan_blocks(3) == A._eval_pow_by_recursion(3)\n A = Matrix([[2]])\n assert A**10 == Matrix([[2**10]]) == A._matrix_pow_by_jordan_blocks(10) == \\\n A._eval_pow_by_recursion(10)\n\n # testing a matrix that cannot be jordan blocked issue 11766\n m = Matrix([[3, 0, 0, 0, -3], [0, -3, -3, 0, 3], [0, 3, 0, 3, 0], [0, 0, 3, 0, 3], [3, 0, 0, 3, 0]])\n raises(MatrixError, lambda: m._matrix_pow_by_jordan_blocks(10))\n\n # test issue 11964\n raises(ValueError, lambda: Matrix([[1, 1], [3, 3]])._matrix_pow_by_jordan_blocks(-10))\n A = Matrix([[0, 1, 0], [0, 0, 1], [0, 0, 0]]) # Nilpotent jordan block size 3\n assert A**10.0 == Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n raises(ValueError, lambda: A**2.1)\n raises(ValueError, lambda: A**(S(3)/2))\n A = Matrix([[8, 1], [3, 2]])\n assert A**10.0 == Matrix([[1760744107, 272388050], [817164150, 126415807]])\n A = Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]]) # Nilpotent jordan block size 1\n assert A**10.2 == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n A = Matrix([[0, 1, 0], [0, 0, 1], [0, 0, 1]]) # Nilpotent jordan block size 2\n assert A**10.0 == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n n = Symbol('n', integer=True)\n assert isinstance(A**n, MatPow)\n n = Symbol('n', integer=True, nonnegative=True)\n raises(ValueError, lambda: A**n)\n assert A**(n + 2) == Matrix([[0, 0, 1], [0, 0, 1], [0, 0, 1]])\n raises(ValueError, lambda: A**(S(3)/2))\n A = Matrix([[0, 0, 1], [3, 0, 1], [4, 3, 1]])\n assert A**5.0 == Matrix([[168, 72, 89], [291, 144, 161], [572, 267, 329]])\n assert A**5.0 == A**5\n A = Matrix([[0, 1, 0],[-1, 0, 0],[0, 0, 0]])\n n = Symbol(\"n\")\n An = A**n\n assert An.subs(n, 2).doit() == A**2\n raises(ValueError, lambda: An.subs(n, -2).doit())\n assert An * An == A**(2*n)\n\n\ndef test_creation():\n raises(ValueError, lambda: Matrix(5, 5, range(20)))\n raises(ValueError, lambda: Matrix(5, -1, []))\n raises(IndexError, lambda: Matrix((1, 2))[2])\n with raises(IndexError):\n Matrix((1, 2))[1:2] = 5\n with raises(IndexError):\n Matrix((1, 2))[3] = 5\n\n assert Matrix() == Matrix([]) == Matrix([[]]) == Matrix(0, 0, [])\n\n a = Matrix([[x, 0], [0, 0]])\n m = a\n assert m.cols == m.rows\n assert m.cols == 2\n assert m[:] == [x, 0, 0, 0]\n\n b = Matrix(2, 2, [x, 0, 0, 0])\n m = b\n assert m.cols == m.rows\n assert m.cols == 2\n assert m[:] == [x, 0, 0, 0]\n\n assert a == b\n\n assert Matrix(b) == b\n\n c = Matrix((\n Matrix((\n (1, 2, 3),\n (4, 5, 6)\n )),\n (7, 8, 9)\n ))\n assert c.cols == 3\n assert c.rows == 3\n assert c[:] == [1, 2, 3, 4, 5, 6, 7, 8, 9]\n\n assert Matrix(eye(2)) == eye(2)\n assert ImmutableMatrix(ImmutableMatrix(eye(2))) == ImmutableMatrix(eye(2))\n assert ImmutableMatrix(c) == c.as_immutable()\n assert Matrix(ImmutableMatrix(c)) == ImmutableMatrix(c).as_mutable()\n\n assert c is not Matrix(c)\n\n\ndef test_tolist():\n lst = [[S.One, S.Half, x*y, S.Zero], [x, y, z, x**2], [y, -S.One, z*x, 3]]\n m = Matrix(lst)\n assert m.tolist() == lst\n\n\ndef test_as_mutable():\n assert zeros(0, 3).as_mutable() == zeros(0, 3)\n assert zeros(0, 3).as_immutable() == ImmutableMatrix(zeros(0, 3))\n assert zeros(3, 0).as_immutable() == ImmutableMatrix(zeros(3, 0))\n\n\ndef test_determinant():\n\n for M in [Matrix(), Matrix([[1]])]:\n assert (\n M.det() ==\n M._eval_det_bareiss() ==\n M._eval_det_berkowitz() ==\n M._eval_det_lu() ==\n 1)\n\n M = Matrix(( (-3, 2),\n ( 8, -5) ))\n\n assert M.det(method=\"bareiss\") == -1\n assert M.det(method=\"berkowitz\") == -1\n assert M.det(method=\"lu\") == -1\n\n M = Matrix(( (x, 1),\n (y, 2*y) ))\n\n assert M.det(method=\"bareiss\") == 2*x*y - y\n assert M.det(method=\"berkowitz\") == 2*x*y - y\n assert M.det(method=\"lu\") == 2*x*y - y\n\n M = Matrix(( (1, 1, 1),\n (1, 2, 3),\n (1, 3, 6) ))\n\n assert M.det(method=\"bareiss\") == 1\n assert M.det(method=\"berkowitz\") == 1\n assert M.det(method=\"lu\") == 1\n\n M = Matrix(( ( 3, -2, 0, 5),\n (-2, 1, -2, 2),\n ( 0, -2, 5, 0),\n ( 5, 0, 3, 4) ))\n\n assert M.det(method=\"bareiss\") == -289\n assert M.det(method=\"berkowitz\") == -289\n assert M.det(method=\"lu\") == -289\n\n M = Matrix(( ( 1, 2, 3, 4),\n ( 5, 6, 7, 8),\n ( 9, 10, 11, 12),\n (13, 14, 15, 16) ))\n\n assert M.det(method=\"bareiss\") == 0\n assert M.det(method=\"berkowitz\") == 0\n assert M.det(method=\"lu\") == 0\n\n M = Matrix(( (3, 2, 0, 0, 0),\n (0, 3, 2, 0, 0),\n (0, 0, 3, 2, 0),\n (0, 0, 0, 3, 2),\n (2, 0, 0, 0, 3) ))\n\n assert M.det(method=\"bareiss\") == 275\n assert M.det(method=\"berkowitz\") == 275\n assert M.det(method=\"lu\") == 275\n\n M = Matrix(( (1, 0, 1, 2, 12),\n (2, 0, 1, 1, 4),\n (2, 1, 1, -1, 3),\n (3, 2, -1, 1, 8),\n (1, 1, 1, 0, 6) ))\n\n assert M.det(method=\"bareiss\") == -55\n assert M.det(method=\"berkowitz\") == -55\n assert M.det(method=\"lu\") == -55\n\n M = Matrix(( (-5, 2, 3, 4, 5),\n ( 1, -4, 3, 4, 5),\n ( 1, 2, -3, 4, 5),\n ( 1, 2, 3, -2, 5),\n ( 1, 2, 3, 4, -1) ))\n\n assert M.det(method=\"bareiss\") == 11664\n assert M.det(method=\"berkowitz\") == 11664\n assert M.det(method=\"lu\") == 11664\n\n M = Matrix(( ( 2, 7, -1, 3, 2),\n ( 0, 0, 1, 0, 1),\n (-2, 0, 7, 0, 2),\n (-3, -2, 4, 5, 3),\n ( 1, 0, 0, 0, 1) ))\n\n assert M.det(method=\"bareiss\") == 123\n assert M.det(method=\"berkowitz\") == 123\n assert M.det(method=\"lu\") == 123\n\n M = Matrix(( (x, y, z),\n (1, 0, 0),\n (y, z, x) ))\n\n assert M.det(method=\"bareiss\") == z**2 - x*y\n assert M.det(method=\"berkowitz\") == z**2 - x*y\n assert M.det(method=\"lu\") == z**2 - x*y\n\n # issue 13835\n a = symbols('a')\n M = lambda n: Matrix([[i + a*j for i in range(n)]\n for j in range(n)])\n assert M(5).det() == 0\n assert M(6).det() == 0\n assert M(7).det() == 0\n\n\ndef test_slicing():\n m0 = eye(4)\n assert m0[:3, :3] == eye(3)\n assert m0[2:4, 0:2] == zeros(2)\n\n m1 = Matrix(3, 3, lambda i, j: i + j)\n assert m1[0, :] == Matrix(1, 3, (0, 1, 2))\n assert m1[1:3, 1] == Matrix(2, 1, (2, 3))\n\n m2 = Matrix([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])\n assert m2[:, -1] == Matrix(4, 1, [3, 7, 11, 15])\n assert m2[-2:, :] == Matrix([[8, 9, 10, 11], [12, 13, 14, 15]])\n\n\ndef test_submatrix_assignment():\n m = zeros(4)\n m[2:4, 2:4] = eye(2)\n assert m == Matrix(((0, 0, 0, 0),\n (0, 0, 0, 0),\n (0, 0, 1, 0),\n (0, 0, 0, 1)))\n m[:2, :2] = eye(2)\n assert m == eye(4)\n m[:, 0] = Matrix(4, 1, (1, 2, 3, 4))\n assert m == Matrix(((1, 0, 0, 0),\n (2, 1, 0, 0),\n (3, 0, 1, 0),\n (4, 0, 0, 1)))\n m[:, :] = zeros(4)\n assert m == zeros(4)\n m[:, :] = [(1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)]\n assert m == Matrix(((1, 2, 3, 4),\n (5, 6, 7, 8),\n (9, 10, 11, 12),\n (13, 14, 15, 16)))\n m[:2, 0] = [0, 0]\n assert m == Matrix(((0, 2, 3, 4),\n (0, 6, 7, 8),\n (9, 10, 11, 12),\n (13, 14, 15, 16)))\n\n\ndef test_extract():\n m = Matrix(4, 3, lambda i, j: i*3 + j)\n assert m.extract([0, 1, 3], [0, 1]) == Matrix(3, 2, [0, 1, 3, 4, 9, 10])\n assert m.extract([0, 3], [0, 0, 2]) == Matrix(2, 3, [0, 0, 2, 9, 9, 11])\n assert m.extract(range(4), range(3)) == m\n raises(IndexError, lambda: m.extract([4], [0]))\n raises(IndexError, lambda: m.extract([0], [3]))\n\n\ndef test_reshape():\n m0 = eye(3)\n assert m0.reshape(1, 9) == Matrix(1, 9, (1, 0, 0, 0, 1, 0, 0, 0, 1))\n m1 = Matrix(3, 4, lambda i, j: i + j)\n assert m1.reshape(\n 4, 3) == Matrix(((0, 1, 2), (3, 1, 2), (3, 4, 2), (3, 4, 5)))\n assert m1.reshape(2, 6) == Matrix(((0, 1, 2, 3, 1, 2), (3, 4, 2, 3, 4, 5)))\n\n\ndef test_applyfunc():\n m0 = eye(3)\n assert m0.applyfunc(lambda x: 2*x) == eye(3)*2\n assert m0.applyfunc(lambda x: 0) == zeros(3)\n\n\ndef test_expand():\n m0 = Matrix([[x*(x + y), 2], [((x + y)*y)*x, x*(y + x*(x + y))]])\n # Test if expand() returns a matrix\n m1 = m0.expand()\n assert m1 == Matrix(\n [[x*y + x**2, 2], [x*y**2 + y*x**2, x*y + y*x**2 + x**3]])\n\n a = Symbol('a', real=True)\n\n assert Matrix([exp(I*a)]).expand(complex=True) == \\\n Matrix([cos(a) + I*sin(a)])\n\n assert Matrix([[0, 1, 2], [0, 0, -1], [0, 0, 0]]).exp() == Matrix([\n [1, 1, Rational(3, 2)],\n [0, 1, -1],\n [0, 0, 1]]\n )\n\ndef test_refine():\n m0 = Matrix([[Abs(x)**2, sqrt(x**2)],\n [sqrt(x**2)*Abs(y)**2, sqrt(y**2)*Abs(x)**2]])\n m1 = m0.refine(Q.real(x) & Q.real(y))\n assert m1 == Matrix([[x**2, Abs(x)], [y**2*Abs(x), x**2*Abs(y)]])\n\n m1 = m0.refine(Q.positive(x) & Q.positive(y))\n assert m1 == Matrix([[x**2, x], [x*y**2, x**2*y]])\n\n m1 = m0.refine(Q.negative(x) & Q.negative(y))\n assert m1 == Matrix([[x**2, -x], [-x*y**2, -x**2*y]])\n\ndef test_random():\n M = randMatrix(3, 3)\n M = randMatrix(3, 3, seed=3)\n assert M == randMatrix(3, 3, seed=3)\n\n M = randMatrix(3, 4, 0, 150)\n M = randMatrix(3, seed=4, symmetric=True)\n assert M == randMatrix(3, seed=4, symmetric=True)\n\n S = M.copy()\n S.simplify()\n assert S == M # doesn't fail when elements are Numbers, not int\n\n rng = random.Random(4)\n assert M == randMatrix(3, symmetric=True, prng=rng)\n\n # Ensure symmetry\n for size in (10, 11): # Test odd and even\n for percent in (100, 70, 30):\n M = randMatrix(size, symmetric=True, percent=percent, prng=rng)\n assert M == M.T\n\n M = randMatrix(10, min=1, percent=70)\n zero_count = 0\n for i in range(M.shape[0]):\n for j in range(M.shape[1]):\n if M[i, j] == 0:\n zero_count += 1\n assert zero_count == 30\n\n\ndef test_LUdecomp():\n testmat = Matrix([[0, 2, 5, 3],\n [3, 3, 7, 4],\n [8, 4, 0, 2],\n [-2, 6, 3, 4]])\n L, U, p = testmat.LUdecomposition()\n assert L.is_lower\n assert U.is_upper\n assert (L*U).permute_rows(p, 'backward') - testmat == zeros(4)\n\n testmat = Matrix([[6, -2, 7, 4],\n [0, 3, 6, 7],\n [1, -2, 7, 4],\n [-9, 2, 6, 3]])\n L, U, p = testmat.LUdecomposition()\n assert L.is_lower\n assert U.is_upper\n assert (L*U).permute_rows(p, 'backward') - testmat == zeros(4)\n\n # non-square\n testmat = Matrix([[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9],\n [10, 11, 12]])\n L, U, p = testmat.LUdecomposition(rankcheck=False)\n assert L.is_lower\n assert U.is_upper\n assert (L*U).permute_rows(p, 'backward') - testmat == zeros(4, 3)\n\n # square and singular\n testmat = Matrix([[1, 2, 3],\n [2, 4, 6],\n [4, 5, 6]])\n L, U, p = testmat.LUdecomposition(rankcheck=False)\n assert L.is_lower\n assert U.is_upper\n assert (L*U).permute_rows(p, 'backward') - testmat == zeros(3)\n\n M = Matrix(((1, x, 1), (2, y, 0), (y, 0, z)))\n L, U, p = M.LUdecomposition()\n assert L.is_lower\n assert U.is_upper\n assert (L*U).permute_rows(p, 'backward') - M == zeros(3)\n\n mL = Matrix((\n (1, 0, 0),\n (2, 3, 0),\n ))\n assert mL.is_lower is True\n assert mL.is_upper is False\n mU = Matrix((\n (1, 2, 3),\n (0, 4, 5),\n ))\n assert mU.is_lower is False\n assert mU.is_upper is True\n\n # test FF LUdecomp\n M = Matrix([[1, 3, 3],\n [3, 2, 6],\n [3, 2, 2]])\n P, L, Dee, U = M.LUdecompositionFF()\n assert P*M == L*Dee.inv()*U\n\n M = Matrix([[1, 2, 3, 4],\n [3, -1, 2, 3],\n [3, 1, 3, -2],\n [6, -1, 0, 2]])\n P, L, Dee, U = M.LUdecompositionFF()\n assert P*M == L*Dee.inv()*U\n\n M = Matrix([[0, 0, 1],\n [2, 3, 0],\n [3, 1, 4]])\n P, L, Dee, U = M.LUdecompositionFF()\n assert P*M == L*Dee.inv()*U\n\n # issue 15794\n M = Matrix(\n [[1, 2, 3],\n [4, 5, 6],\n [7, 8, 9]]\n )\n raises(ValueError, lambda : M.LUdecomposition_Simple(rankcheck=True))\n\ndef test_LUsolve():\n A = Matrix([[2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n x = Matrix(3, 1, [3, 7, 5])\n b = A*x\n soln = A.LUsolve(b)\n assert soln == x\n A = Matrix([[0, -1, 2],\n [5, 10, 7],\n [8, 3, 4]])\n x = Matrix(3, 1, [-1, 2, 5])\n b = A*x\n soln = A.LUsolve(b)\n assert soln == x\n A = Matrix([[2, 1], [1, 0], [1, 0]]) # issue 14548\n b = Matrix([3, 1, 1])\n assert A.LUsolve(b) == Matrix([1, 1])\n b = Matrix([3, 1, 2]) # inconsistent\n raises(ValueError, lambda: A.LUsolve(b))\n A = Matrix([[0, -1, 2],\n [5, 10, 7],\n [8, 3, 4],\n [2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n x = Matrix([2, 1, -4])\n b = A*x\n soln = A.LUsolve(b)\n assert soln == x\n A = Matrix([[0, -1, 2], [5, 10, 7]]) # underdetermined\n x = Matrix([-1, 2, 0])\n b = A*x\n raises(NotImplementedError, lambda: A.LUsolve(b))\n\n\ndef test_QRsolve():\n A = Matrix([[2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n x = Matrix(3, 1, [3, 7, 5])\n b = A*x\n soln = A.QRsolve(b)\n assert soln == x\n x = Matrix([[1, 2], [3, 4], [5, 6]])\n b = A*x\n soln = A.QRsolve(b)\n assert soln == x\n\n A = Matrix([[0, -1, 2],\n [5, 10, 7],\n [8, 3, 4]])\n x = Matrix(3, 1, [-1, 2, 5])\n b = A*x\n soln = A.QRsolve(b)\n assert soln == x\n x = Matrix([[7, 8], [9, 10], [11, 12]])\n b = A*x\n soln = A.QRsolve(b)\n assert soln == x\n\n\ndef test_inverse():\n A = eye(4)\n assert A.inv() == eye(4)\n assert A.inv(method=\"LU\") == eye(4)\n assert A.inv(method=\"ADJ\") == eye(4)\n A = Matrix([[2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n Ainv = A.inv()\n assert A*Ainv == eye(3)\n assert A.inv(method=\"LU\") == Ainv\n assert A.inv(method=\"ADJ\") == Ainv\n\n # test that immutability is not a problem\n cls = ImmutableMatrix\n m = cls([[48, 49, 31],\n [ 9, 71, 94],\n [59, 28, 65]])\n assert all(type(m.inv(s)) is cls for s in 'GE ADJ LU'.split())\n cls = ImmutableSparseMatrix\n m = cls([[48, 49, 31],\n [ 9, 71, 94],\n [59, 28, 65]])\n assert all(type(m.inv(s)) is cls for s in 'CH LDL'.split())\n\n\ndef test_matrix_inverse_mod():\n A = Matrix(2, 1, [1, 0])\n raises(NonSquareMatrixError, lambda: A.inv_mod(2))\n A = Matrix(2, 2, [1, 0, 0, 0])\n raises(ValueError, lambda: A.inv_mod(2))\n A = Matrix(2, 2, [1, 2, 3, 4])\n Ai = Matrix(2, 2, [1, 1, 0, 1])\n assert A.inv_mod(3) == Ai\n A = Matrix(2, 2, [1, 0, 0, 1])\n assert A.inv_mod(2) == A\n A = Matrix(3, 3, [1, 2, 3, 4, 5, 6, 7, 8, 9])\n raises(ValueError, lambda: A.inv_mod(5))\n A = Matrix(3, 3, [5, 1, 3, 2, 6, 0, 2, 1, 1])\n Ai = Matrix(3, 3, [6, 8, 0, 1, 5, 6, 5, 6, 4])\n assert A.inv_mod(9) == Ai\n A = Matrix(3, 3, [1, 6, -3, 4, 1, -5, 3, -5, 5])\n Ai = Matrix(3, 3, [4, 3, 3, 1, 2, 5, 1, 5, 1])\n assert A.inv_mod(6) == Ai\n A = Matrix(3, 3, [1, 6, 1, 4, 1, 5, 3, 2, 5])\n Ai = Matrix(3, 3, [6, 0, 3, 6, 6, 4, 1, 6, 1])\n assert A.inv_mod(7) == Ai\n\n\ndef test_util():\n R = Rational\n\n v1 = Matrix(1, 3, [1, 2, 3])\n v2 = Matrix(1, 3, [3, 4, 5])\n assert v1.norm() == sqrt(14)\n assert v1.project(v2) == Matrix(1, 3, [R(39)/25, R(52)/25, R(13)/5])\n assert Matrix.zeros(1, 2) == Matrix(1, 2, [0, 0])\n assert ones(1, 2) == Matrix(1, 2, [1, 1])\n assert v1.copy() == v1\n # cofactor\n assert eye(3) == eye(3).cofactor_matrix()\n test = Matrix([[1, 3, 2], [2, 6, 3], [2, 3, 6]])\n assert test.cofactor_matrix() == \\\n Matrix([[27, -6, -6], [-12, 2, 3], [-3, 1, 0]])\n test = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n assert test.cofactor_matrix() == \\\n Matrix([[-3, 6, -3], [6, -12, 6], [-3, 6, -3]])\n\n\ndef test_jacobian_hessian():\n L = Matrix(1, 2, [x**2*y, 2*y**2 + x*y])\n syms = [x, y]\n assert L.jacobian(syms) == Matrix([[2*x*y, x**2], [y, 4*y + x]])\n\n L = Matrix(1, 2, [x, x**2*y**3])\n assert L.jacobian(syms) == Matrix([[1, 0], [2*x*y**3, x**2*3*y**2]])\n\n f = x**2*y\n syms = [x, y]\n assert hessian(f, syms) == Matrix([[2*y, 2*x], [2*x, 0]])\n\n f = x**2*y**3\n assert hessian(f, syms) == \\\n Matrix([[2*y**3, 6*x*y**2], [6*x*y**2, 6*x**2*y]])\n\n f = z + x*y**2\n g = x**2 + 2*y**3\n ans = Matrix([[0, 2*y],\n [2*y, 2*x]])\n assert ans == hessian(f, Matrix([x, y]))\n assert ans == hessian(f, Matrix([x, y]).T)\n assert hessian(f, (y, x), [g]) == Matrix([\n [ 0, 6*y**2, 2*x],\n [6*y**2, 2*x, 2*y],\n [ 2*x, 2*y, 0]])\n\n\ndef test_QR():\n A = Matrix([[1, 2], [2, 3]])\n Q, S = A.QRdecomposition()\n R = Rational\n assert Q == Matrix([\n [ 5**R(-1, 2), (R(2)/5)*(R(1)/5)**R(-1, 2)],\n [2*5**R(-1, 2), (-R(1)/5)*(R(1)/5)**R(-1, 2)]])\n assert S == Matrix([[5**R(1, 2), 8*5**R(-1, 2)], [0, (R(1)/5)**R(1, 2)]])\n assert Q*S == A\n assert Q.T * Q == eye(2)\n\n A = Matrix([[1, 1, 1], [1, 1, 3], [2, 3, 4]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n\ndef test_QR_non_square():\n # Narrow (cols < rows) matrices\n A = Matrix([[9, 0, 26], [12, 0, -7], [0, 4, 4], [0, -3, -3]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[1, -1, 4], [1, 4, -2], [1, 4, 2], [1, -1, 0]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix(2, 1, [1, 2])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n # Wide (cols > rows) matrices\n A = Matrix([[1, 2, 3], [4, 5, 6]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[1, 2, 3, 4], [1, 4, 9, 16], [1, 8, 27, 64]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix(1, 2, [1, 2])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\ndef test_QR_trivial():\n # Rank deficient matrices\n A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[1, 1, 1], [2, 2, 2], [3, 3, 3], [4, 4, 4]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n # Zero rank matrices\n A = Matrix([[0, 0, 0]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0], [0, 0, 0]])\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0], [0, 0, 0]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n # Rank deficient matrices with zero norm from beginning columns\n A = Matrix([[0, 0, 0], [1, 2, 3]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0, 0], [1, 2, 3, 4], [0, 0, 0, 0]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0, 0], [1, 2, 3, 4], [0, 0, 0, 0], [2, 4, 6, 8]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n A = Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0], [1, 2, 3]]).T\n Q, R = A.QRdecomposition()\n assert Q.T * Q == eye(Q.cols)\n assert R.is_upper\n assert A == Q*R\n\n\ndef test_nullspace():\n # first test reduced row-ech form\n R = Rational\n\n M = Matrix([[5, 7, 2, 1],\n [1, 6, 2, -1]])\n out, tmp = M.rref()\n assert out == Matrix([[1, 0, -R(2)/23, R(13)/23],\n [0, 1, R(8)/23, R(-6)/23]])\n\n M = Matrix([[-5, -1, 4, -3, -1],\n [ 1, -1, -1, 1, 0],\n [-1, 0, 0, 0, 0],\n [ 4, 1, -4, 3, 1],\n [-2, 0, 2, -2, -1]])\n assert M*M.nullspace()[0] == Matrix(5, 1, [0]*5)\n\n M = Matrix([[ 1, 3, 0, 2, 6, 3, 1],\n [-2, -6, 0, -2, -8, 3, 1],\n [ 3, 9, 0, 0, 6, 6, 2],\n [-1, -3, 0, 1, 0, 9, 3]])\n out, tmp = M.rref()\n assert out == Matrix([[1, 3, 0, 0, 2, 0, 0],\n [0, 0, 0, 1, 2, 0, 0],\n [0, 0, 0, 0, 0, 1, R(1)/3],\n [0, 0, 0, 0, 0, 0, 0]])\n\n # now check the vectors\n basis = M.nullspace()\n assert basis[0] == Matrix([-3, 1, 0, 0, 0, 0, 0])\n assert basis[1] == Matrix([0, 0, 1, 0, 0, 0, 0])\n assert basis[2] == Matrix([-2, 0, 0, -2, 1, 0, 0])\n assert basis[3] == Matrix([0, 0, 0, 0, 0, R(-1)/3, 1])\n\n # issue 4797; just see that we can do it when rows > cols\n M = Matrix([[1, 2], [2, 4], [3, 6]])\n assert M.nullspace()\n\n\ndef test_columnspace():\n M = Matrix([[ 1, 2, 0, 2, 5],\n [-2, -5, 1, -1, -8],\n [ 0, -3, 3, 4, 1],\n [ 3, 6, 0, -7, 2]])\n\n # now check the vectors\n basis = M.columnspace()\n assert basis[0] == Matrix([1, -2, 0, 3])\n assert basis[1] == Matrix([2, -5, -3, 6])\n assert basis[2] == Matrix([2, -1, 4, -7])\n\n #check by columnspace definition\n a, b, c, d, e = symbols('a b c d e')\n X = Matrix([a, b, c, d, e])\n for i in range(len(basis)):\n eq=M*X-basis[i]\n assert len(solve(eq, X)) != 0\n\n #check if rank-nullity theorem holds\n assert M.rank() == len(basis)\n assert len(M.nullspace()) + len(M.columnspace()) == M.cols\n\n\ndef test_wronskian():\n assert wronskian([cos(x), sin(x)], x) == cos(x)**2 + sin(x)**2\n assert wronskian([exp(x), exp(2*x)], x) == exp(3*x)\n assert wronskian([exp(x), x], x) == exp(x) - x*exp(x)\n assert wronskian([1, x, x**2], x) == 2\n w1 = -6*exp(x)*sin(x)*x + 6*cos(x)*exp(x)*x**2 - 6*exp(x)*cos(x)*x - \\\n exp(x)*cos(x)*x**3 + exp(x)*sin(x)*x**3\n assert wronskian([exp(x), cos(x), x**3], x).expand() == w1\n assert wronskian([exp(x), cos(x), x**3], x, method='berkowitz').expand() \\\n == w1\n w2 = -x**3*cos(x)**2 - x**3*sin(x)**2 - 6*x*cos(x)**2 - 6*x*sin(x)**2\n assert wronskian([sin(x), cos(x), x**3], x).expand() == w2\n assert wronskian([sin(x), cos(x), x**3], x, method='berkowitz').expand() \\\n == w2\n assert wronskian([], x) == 1\n\n\ndef test_eigen():\n R = Rational\n\n assert eye(3).charpoly(x) == Poly((x - 1)**3, x)\n assert eye(3).charpoly(y) == Poly((y - 1)**3, y)\n\n M = Matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1]])\n\n assert M.eigenvals(multiple=False) == {S.One: 3}\n assert M.eigenvals(multiple=True) == [1, 1, 1]\n\n assert M.eigenvects() == (\n [(1, 3, [Matrix([1, 0, 0]),\n Matrix([0, 1, 0]),\n Matrix([0, 0, 1])])])\n\n assert M.left_eigenvects() == (\n [(1, 3, [Matrix([[1, 0, 0]]),\n Matrix([[0, 1, 0]]),\n Matrix([[0, 0, 1]])])])\n\n M = Matrix([[0, 1, 1],\n [1, 0, 0],\n [1, 1, 1]])\n\n assert M.eigenvals() == {2*S.One: 1, -S.One: 1, S.Zero: 1}\n\n assert M.eigenvects() == (\n [\n (-1, 1, [Matrix([-1, 1, 0])]),\n ( 0, 1, [Matrix([0, -1, 1])]),\n ( 2, 1, [Matrix([R(2, 3), R(1, 3), 1])])\n ])\n\n assert M.left_eigenvects() == (\n [\n (-1, 1, [Matrix([[-2, 1, 1]])]),\n (0, 1, [Matrix([[-1, -1, 1]])]),\n (2, 1, [Matrix([[1, 1, 1]])])\n ])\n\n a = Symbol('a')\n M = Matrix([[a, 0],\n [0, 1]])\n\n assert M.eigenvals() == {a: 1, S.One: 1}\n\n M = Matrix([[1, -1],\n [1, 3]])\n assert M.eigenvects() == ([(2, 2, [Matrix(2, 1, [-1, 1])])])\n assert M.left_eigenvects() == ([(2, 2, [Matrix([[1, 1]])])])\n\n M = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n a = R(15, 2)\n b = 3*33**R(1, 2)\n c = R(13, 2)\n d = (R(33, 8) + 3*b/8)\n e = (R(33, 8) - 3*b/8)\n\n def NS(e, n):\n return str(N(e, n))\n r = [\n (a - b/2, 1, [Matrix([(12 + 24/(c - b/2))/((c - b/2)*e) + 3/(c - b/2),\n (6 + 12/(c - b/2))/e, 1])]),\n ( 0, 1, [Matrix([1, -2, 1])]),\n (a + b/2, 1, [Matrix([(12 + 24/(c + b/2))/((c + b/2)*d) + 3/(c + b/2),\n (6 + 12/(c + b/2))/d, 1])]),\n ]\n r1 = [(NS(r[i][0], 2), NS(r[i][1], 2),\n [NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]\n r = M.eigenvects()\n r2 = [(NS(r[i][0], 2), NS(r[i][1], 2),\n [NS(j, 2) for j in r[i][2][0]]) for i in range(len(r))]\n assert sorted(r1) == sorted(r2)\n\n eps = Symbol('eps', real=True)\n\n M = Matrix([[abs(eps), I*eps ],\n [-I*eps, abs(eps) ]])\n\n assert M.eigenvects() == (\n [\n ( 0, 1, [Matrix([[-I*eps/abs(eps)], [1]])]),\n ( 2*abs(eps), 1, [ Matrix([[I*eps/abs(eps)], [1]]) ] ),\n ])\n\n assert M.left_eigenvects() == (\n [\n (0, 1, [Matrix([[I*eps/Abs(eps), 1]])]),\n (2*Abs(eps), 1, [Matrix([[-I*eps/Abs(eps), 1]])])\n ])\n\n M = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])\n M._eigenvects = M.eigenvects(simplify=False)\n assert max(i.q for i in M._eigenvects[0][2][0]) > 1\n M._eigenvects = M.eigenvects(simplify=True)\n assert max(i.q for i in M._eigenvects[0][2][0]) == 1\n M = Matrix([[S(1)/4, 1], [1, 1]])\n assert M.eigenvects(simplify=True) == [\n (S(5)/8 + sqrt(73)/8, 1, [Matrix([[-S(3)/8 + sqrt(73)/8], [1]])]),\n (-sqrt(73)/8 + S(5)/8, 1, [Matrix([[-sqrt(73)/8 - S(3)/8], [1]])])]\n assert M.eigenvects(simplify=False) ==[(S(5)/8 + sqrt(73)/8, 1, [Matrix([\n [-1/(-sqrt(73)/8 - S(3)/8)],\n [ 1]])]), (-sqrt(73)/8 + S(5)/8, 1, [Matrix([\n [-1/(-S(3)/8 + sqrt(73)/8)],\n [ 1]])])]\n\n m = Matrix([[1, .6, .6], [.6, .9, .9], [.9, .6, .6]])\n evals = {-sqrt(385)/20 + S(5)/4: 1, sqrt(385)/20 + S(5)/4: 1, S.Zero: 1}\n assert m.eigenvals() == evals\n nevals = list(sorted(m.eigenvals(rational=False).keys()))\n sevals = list(sorted(evals.keys()))\n assert all(abs(nevals[i] - sevals[i]) < 1e-9 for i in range(len(nevals)))\n\n # issue 10719\n assert Matrix([]).eigenvals() == {}\n assert Matrix([]).eigenvects() == []\n\n # issue 15119\n raises(NonSquareMatrixError, lambda : Matrix([[1, 2], [0, 4], [0, 0]]).eigenvals())\n raises(NonSquareMatrixError, lambda : Matrix([[1, 0], [3, 4], [5, 6]]).eigenvals())\n raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals())\n raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals())\n raises(NonSquareMatrixError, lambda : Matrix([[1, 2, 3], [0, 5, 6]]).eigenvals(error_when_incomplete = False))\n raises(NonSquareMatrixError, lambda : Matrix([[1, 0, 0], [4, 5, 0]]).eigenvals(error_when_incomplete = False))\n\n # issue 15125\n from sympy.core.function import count_ops\n q = Symbol(\"q\", positive = True)\n m = Matrix([[-2, exp(-q), 1], [exp(q), -2, 1], [1, 1, -2]])\n assert count_ops(m.eigenvals(simplify=False)) > count_ops(m.eigenvals(simplify=True))\n assert count_ops(m.eigenvals(simplify=lambda x: x)) > count_ops(m.eigenvals(simplify=True))\n\n assert isinstance(m.eigenvals(simplify=True, multiple=False), dict)\n assert isinstance(m.eigenvals(simplify=True, multiple=True), list)\n assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=False), dict)\n assert isinstance(m.eigenvals(simplify=lambda x: x, multiple=True), list)\n\ndef test_subs():\n assert Matrix([[1, x], [x, 4]]).subs(x, 5) == Matrix([[1, 5], [5, 4]])\n assert Matrix([[x, 2], [x + y, 4]]).subs([[x, -1], [y, -2]]) == \\\n Matrix([[-1, 2], [-3, 4]])\n assert Matrix([[x, 2], [x + y, 4]]).subs([(x, -1), (y, -2)]) == \\\n Matrix([[-1, 2], [-3, 4]])\n assert Matrix([[x, 2], [x + y, 4]]).subs({x: -1, y: -2}) == \\\n Matrix([[-1, 2], [-3, 4]])\n assert Matrix([x*y]).subs({x: y - 1, y: x - 1}, simultaneous=True) == \\\n Matrix([(x - 1)*(y - 1)])\n\n for cls in classes:\n assert Matrix([[2, 0], [0, 2]]) == cls.eye(2).subs(1, 2)\n\ndef test_xreplace():\n assert Matrix([[1, x], [x, 4]]).xreplace({x: 5}) == \\\n Matrix([[1, 5], [5, 4]])\n assert Matrix([[x, 2], [x + y, 4]]).xreplace({x: -1, y: -2}) == \\\n Matrix([[-1, 2], [-3, 4]])\n for cls in classes:\n assert Matrix([[2, 0], [0, 2]]) == cls.eye(2).xreplace({1: 2})\n\ndef test_simplify():\n n = Symbol('n')\n f = Function('f')\n\n M = Matrix([[ 1/x + 1/y, (x + x*y) / x ],\n [ (f(x) + y*f(x))/f(x), 2 * (1/n - cos(n * pi)/n) / pi ]])\n M.simplify()\n assert M == Matrix([[ (x + y)/(x * y), 1 + y ],\n [ 1 + y, 2*((1 - 1*cos(pi*n))/(pi*n)) ]])\n eq = (1 + x)**2\n M = Matrix([[eq]])\n M.simplify()\n assert M == Matrix([[eq]])\n M.simplify(ratio=oo) == M\n assert M == Matrix([[eq.simplify(ratio=oo)]])\n\n\ndef test_transpose():\n M = Matrix([[1, 2, 3, 4, 5, 6, 7, 8, 9, 0],\n [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]])\n assert M.T == Matrix( [ [1, 1],\n [2, 2],\n [3, 3],\n [4, 4],\n [5, 5],\n [6, 6],\n [7, 7],\n [8, 8],\n [9, 9],\n [0, 0] ])\n assert M.T.T == M\n assert M.T == M.transpose()\n\n\ndef test_conjugate():\n M = Matrix([[0, I, 5],\n [1, 2, 0]])\n\n assert M.T == Matrix([[0, 1],\n [I, 2],\n [5, 0]])\n\n assert M.C == Matrix([[0, -I, 5],\n [1, 2, 0]])\n assert M.C == M.conjugate()\n\n assert M.H == M.T.C\n assert M.H == Matrix([[ 0, 1],\n [-I, 2],\n [ 5, 0]])\n\n\ndef test_conj_dirac():\n raises(AttributeError, lambda: eye(3).D)\n\n M = Matrix([[1, I, I, I],\n [0, 1, I, I],\n [0, 0, 1, I],\n [0, 0, 0, 1]])\n\n assert M.D == Matrix([[ 1, 0, 0, 0],\n [-I, 1, 0, 0],\n [-I, -I, -1, 0],\n [-I, -I, I, -1]])\n\n\ndef test_trace():\n M = Matrix([[1, 0, 0],\n [0, 5, 0],\n [0, 0, 8]])\n assert M.trace() == 14\n\n\ndef test_shape():\n M = Matrix([[x, 0, 0],\n [0, y, 0]])\n assert M.shape == (2, 3)\n\n\ndef test_col_row_op():\n M = Matrix([[x, 0, 0],\n [0, y, 0]])\n M.row_op(1, lambda r, j: r + j + 1)\n assert M == Matrix([[x, 0, 0],\n [1, y + 2, 3]])\n\n M.col_op(0, lambda c, j: c + y**j)\n assert M == Matrix([[x + 1, 0, 0],\n [1 + y, y + 2, 3]])\n\n # neither row nor slice give copies that allow the original matrix to\n # be changed\n assert M.row(0) == Matrix([[x + 1, 0, 0]])\n r1 = M.row(0)\n r1[0] = 42\n assert M[0, 0] == x + 1\n r1 = M[0, :-1] # also testing negative slice\n r1[0] = 42\n assert M[0, 0] == x + 1\n c1 = M.col(0)\n assert c1 == Matrix([x + 1, 1 + y])\n c1[0] = 0\n assert M[0, 0] == x + 1\n c1 = M[:, 0]\n c1[0] = 42\n assert M[0, 0] == x + 1\n\n\ndef test_zip_row_op():\n for cls in classes[:2]: # XXX: immutable matrices don't support row ops\n M = cls.eye(3)\n M.zip_row_op(1, 0, lambda v, u: v + 2*u)\n assert M == cls([[1, 0, 0],\n [2, 1, 0],\n [0, 0, 1]])\n\n M = cls.eye(3)*2\n M[0, 1] = -1\n M.zip_row_op(1, 0, lambda v, u: v + 2*u); M\n assert M == cls([[2, -1, 0],\n [4, 0, 0],\n [0, 0, 2]])\n\ndef test_issue_3950():\n m = Matrix([1, 2, 3])\n a = Matrix([1, 2, 3])\n b = Matrix([2, 2, 3])\n assert not (m in [])\n assert not (m in [1])\n assert m != 1\n assert m == a\n assert m != b\n\n\ndef test_issue_3981():\n class Index1(object):\n def __index__(self):\n return 1\n\n class Index2(object):\n def __index__(self):\n return 2\n index1 = Index1()\n index2 = Index2()\n\n m = Matrix([1, 2, 3])\n\n assert m[index2] == 3\n\n m[index2] = 5\n assert m[2] == 5\n\n m = Matrix([[1, 2, 3], [4, 5, 6]])\n assert m[index1, index2] == 6\n assert m[1, index2] == 6\n assert m[index1, 2] == 6\n\n m[index1, index2] = 4\n assert m[1, 2] == 4\n m[1, index2] = 6\n assert m[1, 2] == 6\n m[index1, 2] = 8\n assert m[1, 2] == 8\n\n\ndef test_evalf():\n a = Matrix([sqrt(5), 6])\n assert all(a.evalf()[i] == a[i].evalf() for i in range(2))\n assert all(a.evalf(2)[i] == a[i].evalf(2) for i in range(2))\n assert all(a.n(2)[i] == a[i].n(2) for i in range(2))\n\n\ndef test_is_symbolic():\n a = Matrix([[x, x], [x, x]])\n assert a.is_symbolic() is True\n a = Matrix([[1, 2, 3, 4], [5, 6, 7, 8]])\n assert a.is_symbolic() is False\n a = Matrix([[1, 2, 3, 4], [5, 6, x, 8]])\n assert a.is_symbolic() is True\n a = Matrix([[1, x, 3]])\n assert a.is_symbolic() is True\n a = Matrix([[1, 2, 3]])\n assert a.is_symbolic() is False\n a = Matrix([[1], [x], [3]])\n assert a.is_symbolic() is True\n a = Matrix([[1], [2], [3]])\n assert a.is_symbolic() is False\n\n\ndef test_is_upper():\n a = Matrix([[1, 2, 3]])\n assert a.is_upper is True\n a = Matrix([[1], [2], [3]])\n assert a.is_upper is False\n a = zeros(4, 2)\n assert a.is_upper is True\n\n\ndef test_is_lower():\n a = Matrix([[1, 2, 3]])\n assert a.is_lower is False\n a = Matrix([[1], [2], [3]])\n assert a.is_lower is True\n\n\ndef test_is_nilpotent():\n a = Matrix(4, 4, [0, 2, 1, 6, 0, 0, 1, 2, 0, 0, 0, 3, 0, 0, 0, 0])\n assert a.is_nilpotent()\n a = Matrix([[1, 0], [0, 1]])\n assert not a.is_nilpotent()\n a = Matrix([])\n assert a.is_nilpotent()\n\n\ndef test_zeros_ones_fill():\n n, m = 3, 5\n\n a = zeros(n, m)\n a.fill( 5 )\n\n b = 5 * ones(n, m)\n\n assert a == b\n assert a.rows == b.rows == 3\n assert a.cols == b.cols == 5\n assert a.shape == b.shape == (3, 5)\n assert zeros(2) == zeros(2, 2)\n assert ones(2) == ones(2, 2)\n assert zeros(2, 3) == Matrix(2, 3, [0]*6)\n assert ones(2, 3) == Matrix(2, 3, [1]*6)\n\n\ndef test_empty_zeros():\n a = zeros(0)\n assert a == Matrix()\n a = zeros(0, 2)\n assert a.rows == 0\n assert a.cols == 2\n a = zeros(2, 0)\n assert a.rows == 2\n assert a.cols == 0\n\n\ndef test_issue_3749():\n a = Matrix([[x**2, x*y], [x*sin(y), x*cos(y)]])\n assert a.diff(x) == Matrix([[2*x, y], [sin(y), cos(y)]])\n assert Matrix([\n [x, -x, x**2],\n [exp(x), 1/x - exp(-x), x + 1/x]]).limit(x, oo) == \\\n Matrix([[oo, -oo, oo], [oo, 0, oo]])\n assert Matrix([\n [(exp(x) - 1)/x, 2*x + y*x, x**x ],\n [1/x, abs(x), abs(sin(x + 1))]]).limit(x, 0) == \\\n Matrix([[1, 0, 1], [oo, 0, sin(1)]])\n assert a.integrate(x) == Matrix([\n [Rational(1, 3)*x**3, y*x**2/2],\n [x**2*sin(y)/2, x**2*cos(y)/2]])\n\n\ndef test_inv_iszerofunc():\n A = eye(4)\n A.col_swap(0, 1)\n for method in \"GE\", \"LU\":\n assert A.inv(method=method, iszerofunc=lambda x: x == 0) == \\\n A.inv(method=\"ADJ\")\n\n\ndef test_jacobian_metrics():\n rho, phi = symbols(\"rho,phi\")\n X = Matrix([rho*cos(phi), rho*sin(phi)])\n Y = Matrix([rho, phi])\n J = X.jacobian(Y)\n assert J == X.jacobian(Y.T)\n assert J == (X.T).jacobian(Y)\n assert J == (X.T).jacobian(Y.T)\n g = J.T*eye(J.shape[0])*J\n g = g.applyfunc(trigsimp)\n assert g == Matrix([[1, 0], [0, rho**2]])\n\n\ndef test_jacobian2():\n rho, phi = symbols(\"rho,phi\")\n X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])\n Y = Matrix([rho, phi])\n J = Matrix([\n [cos(phi), -rho*sin(phi)],\n [sin(phi), rho*cos(phi)],\n [ 2*rho, 0],\n ])\n assert X.jacobian(Y) == J\n\n\ndef test_issue_4564():\n X = Matrix([exp(x + y + z), exp(x + y + z), exp(x + y + z)])\n Y = Matrix([x, y, z])\n for i in range(1, 3):\n for j in range(1, 3):\n X_slice = X[:i, :]\n Y_slice = Y[:j, :]\n J = X_slice.jacobian(Y_slice)\n assert J.rows == i\n assert J.cols == j\n for k in range(j):\n assert J[:, k] == X_slice\n\n\ndef test_nonvectorJacobian():\n X = Matrix([[exp(x + y + z), exp(x + y + z)],\n [exp(x + y + z), exp(x + y + z)]])\n raises(TypeError, lambda: X.jacobian(Matrix([x, y, z])))\n X = X[0, :]\n Y = Matrix([[x, y], [x, z]])\n raises(TypeError, lambda: X.jacobian(Y))\n raises(TypeError, lambda: X.jacobian(Matrix([ [x, y], [x, z] ])))\n\n\ndef test_vec():\n m = Matrix([[1, 3], [2, 4]])\n m_vec = m.vec()\n assert m_vec.cols == 1\n for i in range(4):\n assert m_vec[i] == i + 1\n\n\ndef test_vech():\n m = Matrix([[1, 2], [2, 3]])\n m_vech = m.vech()\n assert m_vech.cols == 1\n for i in range(3):\n assert m_vech[i] == i + 1\n m_vech = m.vech(diagonal=False)\n assert m_vech[0] == 2\n\n m = Matrix([[1, x*(x + y)], [y*x + x**2, 1]])\n m_vech = m.vech(diagonal=False)\n assert m_vech[0] == x*(x + y)\n\n m = Matrix([[1, x*(x + y)], [y*x, 1]])\n m_vech = m.vech(diagonal=False, check_symmetry=False)\n assert m_vech[0] == y*x\n\n\ndef test_vech_errors():\n m = Matrix([[1, 3]])\n raises(ShapeError, lambda: m.vech())\n m = Matrix([[1, 3], [2, 4]])\n raises(ValueError, lambda: m.vech())\n raises(ShapeError, lambda: Matrix([ [1, 3] ]).vech())\n raises(ValueError, lambda: Matrix([ [1, 3], [2, 4] ]).vech())\n\n\ndef test_diag():\n a = Matrix([[1, 2], [2, 3]])\n b = Matrix([[3, x], [y, 3]])\n c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])\n assert diag(a, b, b) == Matrix([\n [1, 2, 0, 0, 0, 0],\n [2, 3, 0, 0, 0, 0],\n [0, 0, 3, x, 0, 0],\n [0, 0, y, 3, 0, 0],\n [0, 0, 0, 0, 3, x],\n [0, 0, 0, 0, y, 3],\n ])\n assert diag(a, b, c) == Matrix([\n [1, 2, 0, 0, 0, 0, 0],\n [2, 3, 0, 0, 0, 0, 0],\n [0, 0, 3, x, 0, 0, 0],\n [0, 0, y, 3, 0, 0, 0],\n [0, 0, 0, 0, 3, x, 3],\n [0, 0, 0, 0, y, 3, z],\n [0, 0, 0, 0, x, y, z],\n ])\n assert diag(a, c, b) == Matrix([\n [1, 2, 0, 0, 0, 0, 0],\n [2, 3, 0, 0, 0, 0, 0],\n [0, 0, 3, x, 3, 0, 0],\n [0, 0, y, 3, z, 0, 0],\n [0, 0, x, y, z, 0, 0],\n [0, 0, 0, 0, 0, 3, x],\n [0, 0, 0, 0, 0, y, 3],\n ])\n a = Matrix([x, y, z])\n b = Matrix([[1, 2], [3, 4]])\n c = Matrix([[5, 6]])\n assert diag(a, 7, b, c) == Matrix([\n [x, 0, 0, 0, 0, 0],\n [y, 0, 0, 0, 0, 0],\n [z, 0, 0, 0, 0, 0],\n [0, 7, 0, 0, 0, 0],\n [0, 0, 1, 2, 0, 0],\n [0, 0, 3, 4, 0, 0],\n [0, 0, 0, 0, 5, 6],\n ])\n assert diag(1, [2, 3], [[4, 5]]) == Matrix([\n [1, 0, 0, 0],\n [0, 2, 0, 0],\n [0, 3, 0, 0],\n [0, 0, 4, 5]])\n\n\ndef test_get_diag_blocks1():\n a = Matrix([[1, 2], [2, 3]])\n b = Matrix([[3, x], [y, 3]])\n c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])\n assert a.get_diag_blocks() == [a]\n assert b.get_diag_blocks() == [b]\n assert c.get_diag_blocks() == [c]\n\n\ndef test_get_diag_blocks2():\n a = Matrix([[1, 2], [2, 3]])\n b = Matrix([[3, x], [y, 3]])\n c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])\n assert diag(a, b, b).get_diag_blocks() == [a, b, b]\n assert diag(a, b, c).get_diag_blocks() == [a, b, c]\n assert diag(a, c, b).get_diag_blocks() == [a, c, b]\n assert diag(c, c, b).get_diag_blocks() == [c, c, b]\n\n\ndef test_inv_block():\n a = Matrix([[1, 2], [2, 3]])\n b = Matrix([[3, x], [y, 3]])\n c = Matrix([[3, x, 3], [y, 3, z], [x, y, z]])\n A = diag(a, b, b)\n assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), b.inv())\n A = diag(a, b, c)\n assert A.inv(try_block_diag=True) == diag(a.inv(), b.inv(), c.inv())\n A = diag(a, c, b)\n assert A.inv(try_block_diag=True) == diag(a.inv(), c.inv(), b.inv())\n A = diag(a, a, b, a, c, a)\n assert A.inv(try_block_diag=True) == diag(\n a.inv(), a.inv(), b.inv(), a.inv(), c.inv(), a.inv())\n assert A.inv(try_block_diag=True, method=\"ADJ\") == diag(\n a.inv(method=\"ADJ\"), a.inv(method=\"ADJ\"), b.inv(method=\"ADJ\"),\n a.inv(method=\"ADJ\"), c.inv(method=\"ADJ\"), a.inv(method=\"ADJ\"))\n\n\ndef test_creation_args():\n \"\"\"\n Check that matrix dimensions can be specified using any reasonable type\n (see issue 4614).\n \"\"\"\n raises(ValueError, lambda: zeros(3, -1))\n raises(TypeError, lambda: zeros(1, 2, 3, 4))\n assert zeros(long(3)) == zeros(3)\n assert zeros(Integer(3)) == zeros(3)\n assert zeros(3.) == zeros(3)\n assert eye(long(3)) == eye(3)\n assert eye(Integer(3)) == eye(3)\n assert eye(3.) == eye(3)\n assert ones(long(3), Integer(4)) == ones(3, 4)\n raises(TypeError, lambda: Matrix(5))\n raises(TypeError, lambda: Matrix(1, 2))\n\n\ndef test_diagonal_symmetrical():\n m = Matrix(2, 2, [0, 1, 1, 0])\n assert not m.is_diagonal()\n assert m.is_symmetric()\n assert m.is_symmetric(simplify=False)\n\n m = Matrix(2, 2, [1, 0, 0, 1])\n assert m.is_diagonal()\n\n m = diag(1, 2, 3)\n assert m.is_diagonal()\n assert m.is_symmetric()\n\n m = Matrix(3, 3, [1, 0, 0, 0, 2, 0, 0, 0, 3])\n assert m == diag(1, 2, 3)\n\n m = Matrix(2, 3, zeros(2, 3))\n assert not m.is_symmetric()\n assert m.is_diagonal()\n\n m = Matrix(((5, 0), (0, 6), (0, 0)))\n assert m.is_diagonal()\n\n m = Matrix(((5, 0, 0), (0, 6, 0)))\n assert m.is_diagonal()\n\n m = Matrix(3, 3, [1, x**2 + 2*x + 1, y, (x + 1)**2, 2, 0, y, 0, 3])\n assert m.is_symmetric()\n assert not m.is_symmetric(simplify=False)\n assert m.expand().is_symmetric(simplify=False)\n\n\ndef test_diagonalization():\n m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])\n assert not m.is_diagonalizable()\n assert not m.is_symmetric()\n raises(NonSquareMatrixError, lambda: m.diagonalize())\n\n # diagonalizable\n m = diag(1, 2, 3)\n (P, D) = m.diagonalize()\n assert P == eye(3)\n assert D == m\n\n m = Matrix(2, 2, [0, 1, 1, 0])\n assert m.is_symmetric()\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n\n m = Matrix(2, 2, [1, 0, 0, 3])\n assert m.is_symmetric()\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n assert P == eye(2)\n assert D == m\n\n m = Matrix(2, 2, [1, 1, 0, 0])\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n\n m = Matrix(3, 3, [1, 2, 0, 0, 3, 0, 2, -4, 2])\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n for i in P:\n assert i.as_numer_denom()[1] == 1\n\n m = Matrix(2, 2, [1, 0, 0, 0])\n assert m.is_diagonal()\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n assert P == Matrix([[0, 1], [1, 0]])\n\n # diagonalizable, complex only\n m = Matrix(2, 2, [0, 1, -1, 0])\n assert not m.is_diagonalizable(True)\n raises(MatrixError, lambda: m.diagonalize(True))\n assert m.is_diagonalizable()\n (P, D) = m.diagonalize()\n assert P.inv() * m * P == D\n\n # not diagonalizable\n m = Matrix(2, 2, [0, 1, 0, 0])\n assert not m.is_diagonalizable()\n raises(MatrixError, lambda: m.diagonalize())\n\n m = Matrix(3, 3, [-3, 1, -3, 20, 3, 10, 2, -2, 4])\n assert not m.is_diagonalizable()\n raises(MatrixError, lambda: m.diagonalize())\n\n # symbolic\n a, b, c, d = symbols('a b c d')\n m = Matrix(2, 2, [a, c, c, b])\n assert m.is_symmetric()\n assert m.is_diagonalizable()\n\n\n@XFAIL\ndef test_eigen_vects():\n m = Matrix(2, 2, [1, 0, 0, I])\n raises(NotImplementedError, lambda: m.is_diagonalizable(True))\n # !!! bug because of eigenvects() or roots(x**2 + (-1 - I)*x + I, x)\n # see issue 5292\n assert not m.is_diagonalizable(True)\n raises(MatrixError, lambda: m.diagonalize(True))\n (P, D) = m.diagonalize(True)\n\n\ndef test_jordan_form():\n\n m = Matrix(3, 2, [-3, 1, -3, 20, 3, 10])\n raises(NonSquareMatrixError, lambda: m.jordan_form())\n\n # diagonalizable\n m = Matrix(3, 3, [7, -12, 6, 10, -19, 10, 12, -24, 13])\n Jmust = Matrix(3, 3, [-1, 0, 0, 0, 1, 0, 0, 0, 1])\n P, J = m.jordan_form()\n assert Jmust == J\n assert Jmust == m.diagonalize()[1]\n\n # m = Matrix(3, 3, [0, 6, 3, 1, 3, 1, -2, 2, 1])\n # m.jordan_form() # very long\n # m.jordan_form() #\n\n # diagonalizable, complex only\n\n # Jordan cells\n # complexity: one of eigenvalues is zero\n m = Matrix(3, 3, [0, 1, 0, -4, 4, 0, -2, 1, 2])\n # The blocks are ordered according to the value of their eigenvalues,\n # in order to make the matrix compatible with .diagonalize()\n Jmust = Matrix(3, 3, [2, 1, 0, 0, 2, 0, 0, 0, 2])\n P, J = m.jordan_form()\n assert Jmust == J\n\n # complexity: all of eigenvalues are equal\n m = Matrix(3, 3, [2, 6, -15, 1, 1, -5, 1, 2, -6])\n # Jmust = Matrix(3, 3, [-1, 0, 0, 0, -1, 1, 0, 0, -1])\n # same here see 1456ff\n Jmust = Matrix(3, 3, [-1, 1, 0, 0, -1, 0, 0, 0, -1])\n P, J = m.jordan_form()\n assert Jmust == J\n\n # complexity: two of eigenvalues are zero\n m = Matrix(3, 3, [4, -5, 2, 5, -7, 3, 6, -9, 4])\n Jmust = Matrix(3, 3, [0, 1, 0, 0, 0, 0, 0, 0, 1])\n P, J = m.jordan_form()\n assert Jmust == J\n\n m = Matrix(4, 4, [6, 5, -2, -3, -3, -1, 3, 3, 2, 1, -2, -3, -1, 1, 5, 5])\n Jmust = Matrix(4, 4, [2, 1, 0, 0,\n 0, 2, 0, 0,\n 0, 0, 2, 1,\n 0, 0, 0, 2]\n )\n P, J = m.jordan_form()\n assert Jmust == J\n\n m = Matrix(4, 4, [6, 2, -8, -6, -3, 2, 9, 6, 2, -2, -8, -6, -1, 0, 3, 4])\n # Jmust = Matrix(4, 4, [2, 0, 0, 0, 0, 2, 1, 0, 0, 0, 2, 0, 0, 0, 0, -2])\n # same here see 1456ff\n Jmust = Matrix(4, 4, [-2, 0, 0, 0,\n 0, 2, 1, 0,\n 0, 0, 2, 0,\n 0, 0, 0, 2])\n P, J = m.jordan_form()\n assert Jmust == J\n\n m = Matrix(4, 4, [5, 4, 2, 1, 0, 1, -1, -1, -1, -1, 3, 0, 1, 1, -1, 2])\n assert not m.is_diagonalizable()\n Jmust = Matrix(4, 4, [1, 0, 0, 0, 0, 2, 0, 0, 0, 0, 4, 1, 0, 0, 0, 4])\n P, J = m.jordan_form()\n assert Jmust == J\n\n # checking for maximum precision to remain unchanged\n m = Matrix([[Float('1.0', precision=110), Float('2.0', precision=110)],\n [Float('3.14159265358979323846264338327', precision=110), Float('4.0', precision=110)]])\n P, J = m.jordan_form()\n for term in J._mat:\n if isinstance(term, Float):\n assert term._prec == 110\n\n\ndef test_jordan_form_complex_issue_9274():\n A = Matrix([[ 2, 4, 1, 0],\n [-4, 2, 0, 1],\n [ 0, 0, 2, 4],\n [ 0, 0, -4, 2]])\n p = 2 - 4*I;\n q = 2 + 4*I;\n Jmust1 = Matrix([[p, 1, 0, 0],\n [0, p, 0, 0],\n [0, 0, q, 1],\n [0, 0, 0, q]])\n Jmust2 = Matrix([[q, 1, 0, 0],\n [0, q, 0, 0],\n [0, 0, p, 1],\n [0, 0, 0, p]])\n P, J = A.jordan_form()\n assert J == Jmust1 or J == Jmust2\n assert simplify(P*J*P.inv()) == A\n\ndef test_issue_10220():\n # two non-orthogonal Jordan blocks with eigenvalue 1\n M = Matrix([[1, 0, 0, 1],\n [0, 1, 1, 0],\n [0, 0, 1, 1],\n [0, 0, 0, 1]])\n P, J = M.jordan_form()\n assert P == Matrix([[0, 1, 0, 1],\n [1, 0, 0, 0],\n [0, 1, 0, 0],\n [0, 0, 1, 0]])\n assert J == Matrix([\n [1, 1, 0, 0],\n [0, 1, 1, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n\ndef test_Matrix_berkowitz_charpoly():\n UA, K_i, K_w = symbols('UA K_i K_w')\n\n A = Matrix([[-K_i - UA + K_i**2/(K_i + K_w), K_i*K_w/(K_i + K_w)],\n [ K_i*K_w/(K_i + K_w), -K_w + K_w**2/(K_i + K_w)]])\n\n charpoly = A.charpoly(x)\n\n assert charpoly == \\\n Poly(x**2 + (K_i*UA + K_w*UA + 2*K_i*K_w)/(K_i + K_w)*x +\n K_i*K_w*UA/(K_i + K_w), x, domain='ZZ(K_i,K_w,UA)')\n\n assert type(charpoly) is PurePoly\n\n A = Matrix([[1, 3], [2, 0]])\n assert A.charpoly() == A.charpoly(x) == PurePoly(x**2 - x - 6)\n\n A = Matrix([[1, 2], [x, 0]])\n p = A.charpoly(x)\n assert p.gen != x\n assert p.as_expr().subs(p.gen, x) == x**2 - 3*x\n\n\ndef test_exp():\n m = Matrix([[3, 4], [0, -2]])\n m_exp = Matrix([[exp(3), -4*exp(-2)/5 + 4*exp(3)/5], [0, exp(-2)]])\n assert m.exp() == m_exp\n assert exp(m) == m_exp\n\n m = Matrix([[1, 0], [0, 1]])\n assert m.exp() == Matrix([[E, 0], [0, E]])\n assert exp(m) == Matrix([[E, 0], [0, E]])\n\n m = Matrix([[1, -1], [1, 1]])\n assert m.exp() == Matrix([[E*cos(1), -E*sin(1)], [E*sin(1), E*cos(1)]])\n\n\ndef test_has():\n A = Matrix(((x, y), (2, 3)))\n assert A.has(x)\n assert not A.has(z)\n assert A.has(Symbol)\n\n A = A.subs(x, 2)\n assert not A.has(x)\n\ndef test_LUdecomposition_Simple_iszerofunc():\n # Test if callable passed to matrices.LUdecomposition_Simple() as iszerofunc keyword argument is used inside\n # matrices.LUdecomposition_Simple()\n magic_string = \"I got passed in!\"\n def goofyiszero(value):\n raise ValueError(magic_string)\n\n try:\n lu, p = Matrix([[1, 0], [0, 1]]).LUdecomposition_Simple(iszerofunc=goofyiszero)\n except ValueError as err:\n assert magic_string == err.args[0]\n return\n\n assert False\n\ndef test_LUdecomposition_iszerofunc():\n # Test if callable passed to matrices.LUdecomposition() as iszerofunc keyword argument is used inside\n # matrices.LUdecomposition_Simple()\n magic_string = \"I got passed in!\"\n def goofyiszero(value):\n raise ValueError(magic_string)\n\n try:\n l, u, p = Matrix([[1, 0], [0, 1]]).LUdecomposition(iszerofunc=goofyiszero)\n except ValueError as err:\n assert magic_string == err.args[0]\n return\n\n assert False\n\ndef test_find_reasonable_pivot_naive_finds_guaranteed_nonzero1():\n # Test if matrices._find_reasonable_pivot_naive()\n # finds a guaranteed non-zero pivot when the\n # some of the candidate pivots are symbolic expressions.\n # Keyword argument: simpfunc=None indicates that no simplifications\n # should be performed during the search.\n x = Symbol('x')\n column = Matrix(3, 1, [x, cos(x)**2 + sin(x)**2, Rational(1, 2)])\n pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\\\n _find_reasonable_pivot_naive(column)\n assert pivot_val == Rational(1, 2)\n\ndef test_find_reasonable_pivot_naive_finds_guaranteed_nonzero2():\n # Test if matrices._find_reasonable_pivot_naive()\n # finds a guaranteed non-zero pivot when the\n # some of the candidate pivots are symbolic expressions.\n # Keyword argument: simpfunc=_simplify indicates that the search\n # should attempt to simplify candidate pivots.\n x = Symbol('x')\n column = Matrix(3, 1,\n [x,\n cos(x)**2+sin(x)**2+x**2,\n cos(x)**2+sin(x)**2])\n pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\\\n _find_reasonable_pivot_naive(column, simpfunc=_simplify)\n assert pivot_val == 1\n\ndef test_find_reasonable_pivot_naive_simplifies():\n # Test if matrices._find_reasonable_pivot_naive()\n # simplifies candidate pivots, and reports\n # their offsets correctly.\n x = Symbol('x')\n column = Matrix(3, 1,\n [x,\n cos(x)**2+sin(x)**2+x,\n cos(x)**2+sin(x)**2])\n pivot_offset, pivot_val, pivot_assumed_nonzero, simplified =\\\n _find_reasonable_pivot_naive(column, simpfunc=_simplify)\n\n assert len(simplified) == 2\n assert simplified[0][0] == 1\n assert simplified[0][1] == 1+x\n assert simplified[1][0] == 2\n assert simplified[1][1] == 1\n\ndef test_errors():\n raises(ValueError, lambda: Matrix([[1, 2], [1]]))\n raises(IndexError, lambda: Matrix([[1, 2]])[1.2, 5])\n raises(IndexError, lambda: Matrix([[1, 2]])[1, 5.2])\n raises(ValueError, lambda: randMatrix(3, c=4, symmetric=True))\n raises(ValueError, lambda: Matrix([1, 2]).reshape(4, 6))\n raises(ShapeError,\n lambda: Matrix([[1, 2], [3, 4]]).copyin_matrix([1, 0], Matrix([1, 2])))\n raises(TypeError, lambda: Matrix([[1, 2], [3, 4]]).copyin_list([0,\n 1], set([])))\n raises(NonSquareMatrixError, lambda: Matrix([[1, 2, 3], [2, 3, 0]]).inv())\n raises(ShapeError,\n lambda: Matrix(1, 2, [1, 2]).row_join(Matrix([[1, 2], [3, 4]])))\n raises(\n ShapeError, lambda: Matrix([1, 2]).col_join(Matrix([[1, 2], [3, 4]])))\n raises(ShapeError, lambda: Matrix([1]).row_insert(1, Matrix([[1,\n 2], [3, 4]])))\n raises(ShapeError, lambda: Matrix([1]).col_insert(1, Matrix([[1,\n 2], [3, 4]])))\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).trace())\n raises(TypeError, lambda: Matrix([1]).applyfunc(1))\n raises(ShapeError, lambda: Matrix([1]).LUsolve(Matrix([[1, 2], [3, 4]])))\n raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minor(4, 5))\n raises(ValueError, lambda: Matrix([[1, 2], [3, 4]]).minor_submatrix(4, 5))\n raises(TypeError, lambda: Matrix([1, 2, 3]).cross(1))\n raises(TypeError, lambda: Matrix([1, 2, 3]).dot(1))\n raises(ShapeError, lambda: Matrix([1, 2, 3]).dot(Matrix([1, 2])))\n raises(ShapeError, lambda: Matrix([1, 2]).dot([]))\n raises(TypeError, lambda: Matrix([1, 2]).dot('a'))\n with warns_deprecated_sympy():\n Matrix([[1, 2], [3, 4]]).dot(Matrix([[4, 3], [1, 2]]))\n raises(ShapeError, lambda: Matrix([1, 2]).dot([1, 2, 3]))\n raises(NonSquareMatrixError, lambda: Matrix([1, 2, 3]).exp())\n raises(ShapeError, lambda: Matrix([[1, 2], [3, 4]]).normalized())\n raises(ValueError, lambda: Matrix([1, 2]).inv(method='not a method'))\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_GE())\n raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_GE())\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_ADJ())\n raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inverse_ADJ())\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).inverse_LU())\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).is_nilpotent())\n raises(NonSquareMatrixError, lambda: Matrix([1, 2]).det())\n raises(ValueError,\n lambda: Matrix([[1, 2], [3, 4]]).det(method='Not a real method'))\n raises(ValueError,\n lambda: Matrix([[1, 2, 3, 4], [5, 6, 7, 8],\n [9, 10, 11, 12], [13, 14, 15, 16]]).det(iszerofunc=\"Not function\"))\n raises(ValueError,\n lambda: Matrix([[1, 2, 3, 4], [5, 6, 7, 8],\n [9, 10, 11, 12], [13, 14, 15, 16]]).det(iszerofunc=False))\n raises(ValueError,\n lambda: hessian(Matrix([[1, 2], [3, 4]]), Matrix([[1, 2], [2, 1]])))\n raises(ValueError, lambda: hessian(Matrix([[1, 2], [3, 4]]), []))\n raises(ValueError, lambda: hessian(Symbol('x')**2, 'a'))\n raises(IndexError, lambda: eye(3)[5, 2])\n raises(IndexError, lambda: eye(3)[2, 5])\n M = Matrix(((1, 2, 3, 4), (5, 6, 7, 8), (9, 10, 11, 12), (13, 14, 15, 16)))\n raises(ValueError, lambda: M.det('method=LU_decomposition()'))\n V = Matrix([[10, 10, 10]])\n M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(ValueError, lambda: M.row_insert(4.7, V))\n M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(ValueError, lambda: M.col_insert(-4.2, V))\n\ndef test_len():\n assert len(Matrix()) == 0\n assert len(Matrix([[1, 2]])) == len(Matrix([[1], [2]])) == 2\n assert len(Matrix(0, 2, lambda i, j: 0)) == \\\n len(Matrix(2, 0, lambda i, j: 0)) == 0\n assert len(Matrix([[0, 1, 2], [3, 4, 5]])) == 6\n assert Matrix([1]) == Matrix([[1]])\n assert not Matrix()\n assert Matrix() == Matrix([])\n\n\ndef test_integrate():\n A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2)))\n assert A.integrate(x) == \\\n Matrix(((x, 4*x, x**2/2), (x*y, 2*x, 4*x), (10*x, 5*x, x**3/3)))\n assert A.integrate(y) == \\\n Matrix(((y, 4*y, x*y), (y**2/2, 2*y, 4*y), (10*y, 5*y, y*x**2)))\n\n\ndef test_limit():\n A = Matrix(((1, 4, sin(x)/x), (y, 2, 4), (10, 5, x**2 + 1)))\n assert A.limit(x, 0) == Matrix(((1, 4, 1), (y, 2, 4), (10, 5, 1)))\n\n\ndef test_diff():\n A = MutableDenseMatrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))\n assert isinstance(A.diff(x), type(A))\n assert A.diff(x) == MutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))\n assert A.diff(y) == MutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))\n\n assert diff(A, x) == MutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))\n assert diff(A, y) == MutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))\n\n A_imm = A.as_immutable()\n assert isinstance(A_imm.diff(x), type(A_imm))\n assert A_imm.diff(x) == ImmutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))\n assert A_imm.diff(y) == ImmutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))\n\n assert diff(A_imm, x) == ImmutableDenseMatrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))\n assert diff(A_imm, y) == ImmutableDenseMatrix(((0, 0, 0), (1, 0, 0), (0, 0, 0)))\n\n\ndef test_diff_by_matrix():\n\n # Derive matrix by matrix:\n\n A = MutableDenseMatrix([[x, y], [z, t]])\n assert A.diff(A) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])\n assert diff(A, A) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])\n\n A_imm = A.as_immutable()\n assert A_imm.diff(A_imm) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])\n assert diff(A_imm, A_imm) == Array([[[[1, 0], [0, 0]], [[0, 1], [0, 0]]], [[[0, 0], [1, 0]], [[0, 0], [0, 1]]]])\n\n # Derive a constant matrix:\n assert A.diff(a) == MutableDenseMatrix([[0, 0], [0, 0]])\n\n B = ImmutableDenseMatrix([a, b])\n assert A.diff(B) == A.zeros(2)\n\n # Test diff with tuples:\n\n dB = B.diff([[a, b]])\n assert dB.shape == (2, 2, 1)\n assert dB == Array([[[1], [0]], [[0], [1]]])\n\n f = Function(\"f\")\n fxyz = f(x, y, z)\n assert fxyz.diff([[x, y, z]]) == Array([fxyz.diff(x), fxyz.diff(y), fxyz.diff(z)])\n assert fxyz.diff(([x, y, z], 2)) == Array([\n [fxyz.diff(x, 2), fxyz.diff(x, y), fxyz.diff(x, z)],\n [fxyz.diff(x, y), fxyz.diff(y, 2), fxyz.diff(y, z)],\n [fxyz.diff(x, z), fxyz.diff(z, y), fxyz.diff(z, 2)],\n ])\n\n expr = sin(x)*exp(y)\n assert expr.diff([[x, y]]) == Array([cos(x)*exp(y), sin(x)*exp(y)])\n assert expr.diff(y, ((x, y),)) == Array([cos(x)*exp(y), sin(x)*exp(y)])\n assert expr.diff(x, ((x, y),)) == Array([-sin(x)*exp(y), cos(x)*exp(y)])\n assert expr.diff(((y, x),), [[x, y]]) == Array([[cos(x)*exp(y), -sin(x)*exp(y)], [sin(x)*exp(y), cos(x)*exp(y)]])\n\n # Test different notations:\n\n fxyz.diff(x).diff(y).diff(x) == fxyz.diff(((x, y, z),), 3)[0, 1, 0]\n fxyz.diff(z).diff(y).diff(x) == fxyz.diff(((x, y, z),), 3)[2, 1, 0]\n fxyz.diff([[x, y, z]], ((z, y, x),)) == Array([[fxyz.diff(i).diff(j) for i in (x, y, z)] for j in (z, y, x)])\n\n # Test scalar derived by matrix remains matrix:\n res = x.diff(Matrix([[x, y]]))\n assert isinstance(res, ImmutableDenseMatrix)\n assert res == Matrix([[1, 0]])\n res = (x**3).diff(Matrix([[x, y]]))\n assert isinstance(res, ImmutableDenseMatrix)\n assert res == Matrix([[3*x**2, 0]])\n\n\ndef test_getattr():\n A = Matrix(((1, 4, x), (y, 2, 4), (10, 5, x**2 + 1)))\n raises(AttributeError, lambda: A.nonexistantattribute)\n assert getattr(A, 'diff')(x) == Matrix(((0, 0, 1), (0, 0, 0), (0, 0, 2*x)))\n\n\ndef test_hessenberg():\n A = Matrix([[3, 4, 1], [2, 4, 5], [0, 1, 2]])\n assert A.is_upper_hessenberg\n A = A.T\n assert A.is_lower_hessenberg\n A[0, -1] = 1\n assert A.is_lower_hessenberg is False\n\n A = Matrix([[3, 4, 1], [2, 4, 5], [3, 1, 2]])\n assert not A.is_upper_hessenberg\n\n A = zeros(5, 2)\n assert A.is_upper_hessenberg\n\n\ndef test_cholesky():\n raises(NonSquareMatrixError, lambda: Matrix((1, 2)).cholesky())\n raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).cholesky())\n raises(ValueError, lambda: Matrix(((5 + I, 0), (0, 1))).cholesky())\n raises(ValueError, lambda: Matrix(((1, 5), (5, 1))).cholesky())\n raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).cholesky(hermitian=False))\n assert Matrix(((5 + I, 0), (0, 1))).cholesky(hermitian=False) == Matrix([\n [sqrt(5 + I), 0], [0, 1]])\n A = Matrix(((1, 5), (5, 1)))\n L = A.cholesky(hermitian=False)\n assert L == Matrix([[1, 0], [5, 2*sqrt(6)*I]])\n assert L*L.T == A\n A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))\n L = A.cholesky()\n assert L * L.T == A\n assert L.is_lower\n assert L == Matrix([[5, 0, 0], [3, 3, 0], [-1, 1, 3]])\n A = Matrix(((4, -2*I, 2 + 2*I), (2*I, 2, -1 + I), (2 - 2*I, -1 - I, 11)))\n assert A.cholesky() == Matrix(((2, 0, 0), (I, 1, 0), (1 - I, 0, 3)))\n\n\ndef test_LDLdecomposition():\n raises(NonSquareMatrixError, lambda: Matrix((1, 2)).LDLdecomposition())\n raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).LDLdecomposition())\n raises(ValueError, lambda: Matrix(((5 + I, 0), (0, 1))).LDLdecomposition())\n raises(ValueError, lambda: Matrix(((1, 5), (5, 1))).LDLdecomposition())\n raises(ValueError, lambda: Matrix(((1, 2), (3, 4))).LDLdecomposition(hermitian=False))\n A = Matrix(((1, 5), (5, 1)))\n L, D = A.LDLdecomposition(hermitian=False)\n assert L * D * L.T == A\n A = Matrix(((25, 15, -5), (15, 18, 0), (-5, 0, 11)))\n L, D = A.LDLdecomposition()\n assert L * D * L.T == A\n assert L.is_lower\n assert L == Matrix([[1, 0, 0], [ S(3)/5, 1, 0], [S(-1)/5, S(1)/3, 1]])\n assert D.is_diagonal()\n assert D == Matrix([[25, 0, 0], [0, 9, 0], [0, 0, 9]])\n A = Matrix(((4, -2*I, 2 + 2*I), (2*I, 2, -1 + I), (2 - 2*I, -1 - I, 11)))\n L, D = A.LDLdecomposition()\n assert expand_mul(L * D * L.H) == A\n assert L == Matrix(((1, 0, 0), (I/2, 1, 0), (S(1)/2 - I/2, 0, 1)))\n assert D == Matrix(((4, 0, 0), (0, 1, 0), (0, 0, 9)))\n\n\ndef test_cholesky_solve():\n A = Matrix([[2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n x = Matrix(3, 1, [3, 7, 5])\n b = A*x\n soln = A.cholesky_solve(b)\n assert soln == x\n A = Matrix([[0, -1, 2],\n [5, 10, 7],\n [8, 3, 4]])\n x = Matrix(3, 1, [-1, 2, 5])\n b = A*x\n soln = A.cholesky_solve(b)\n assert soln == x\n A = Matrix(((1, 5), (5, 1)))\n x = Matrix((4, -3))\n b = A*x\n soln = A.cholesky_solve(b)\n assert soln == x\n A = Matrix(((9, 3*I), (-3*I, 5)))\n x = Matrix((-2, 1))\n b = A*x\n soln = A.cholesky_solve(b)\n assert expand_mul(soln) == x\n A = Matrix(((9*I, 3), (-3 + I, 5)))\n x = Matrix((2 + 3*I, -1))\n b = A*x\n soln = A.cholesky_solve(b)\n assert expand_mul(soln) == x\n a00, a01, a11, b0, b1 = symbols('a00, a01, a11, b0, b1')\n A = Matrix(((a00, a01), (a01, a11)))\n b = Matrix((b0, b1))\n x = A.cholesky_solve(b)\n assert simplify(A*x) == b\n\n\ndef test_LDLsolve():\n A = Matrix([[2, 3, 5],\n [3, 6, 2],\n [8, 3, 6]])\n x = Matrix(3, 1, [3, 7, 5])\n b = A*x\n soln = A.LDLsolve(b)\n assert soln == x\n A = Matrix([[0, -1, 2],\n [5, 10, 7],\n [8, 3, 4]])\n x = Matrix(3, 1, [-1, 2, 5])\n b = A*x\n soln = A.LDLsolve(b)\n assert soln == x\n A = Matrix(((9, 3*I), (-3*I, 5)))\n x = Matrix((-2, 1))\n b = A*x\n soln = A.LDLsolve(b)\n assert expand_mul(soln) == x\n A = Matrix(((9*I, 3), (-3 + I, 5)))\n x = Matrix((2 + 3*I, -1))\n b = A*x\n soln = A.cholesky_solve(b)\n assert expand_mul(soln) == x\n\n\ndef test_lower_triangular_solve():\n\n raises(NonSquareMatrixError,\n lambda: Matrix([1, 0]).lower_triangular_solve(Matrix([0, 1])))\n raises(ShapeError,\n lambda: Matrix([[1, 0], [0, 1]]).lower_triangular_solve(Matrix([1])))\n raises(ValueError,\n lambda: Matrix([[2, 1], [1, 2]]).lower_triangular_solve(\n Matrix([[1, 0], [0, 1]])))\n\n A = Matrix([[1, 0], [0, 1]])\n B = Matrix([[x, y], [y, x]])\n C = Matrix([[4, 8], [2, 9]])\n\n assert A.lower_triangular_solve(B) == B\n assert A.lower_triangular_solve(C) == C\n\n\ndef test_upper_triangular_solve():\n\n raises(NonSquareMatrixError,\n lambda: Matrix([1, 0]).upper_triangular_solve(Matrix([0, 1])))\n raises(TypeError,\n lambda: Matrix([[1, 0], [0, 1]]).upper_triangular_solve(Matrix([1])))\n raises(TypeError,\n lambda: Matrix([[2, 1], [1, 2]]).upper_triangular_solve(\n Matrix([[1, 0], [0, 1]])))\n\n A = Matrix([[1, 0], [0, 1]])\n B = Matrix([[x, y], [y, x]])\n C = Matrix([[2, 4], [3, 8]])\n\n assert A.upper_triangular_solve(B) == B\n assert A.upper_triangular_solve(C) == C\n\n\ndef test_diagonal_solve():\n raises(TypeError, lambda: Matrix([1, 1]).diagonal_solve(Matrix([1])))\n A = Matrix([[1, 0], [0, 1]])*2\n B = Matrix([[x, y], [y, x]])\n assert A.diagonal_solve(B) == B/2\n\n\ndef test_matrix_norm():\n # Vector Tests\n # Test columns and symbols\n x = Symbol('x', real=True)\n v = Matrix([cos(x), sin(x)])\n assert trigsimp(v.norm(2)) == 1\n assert v.norm(10) == Pow(cos(x)**10 + sin(x)**10, S(1)/10)\n\n # Test Rows\n A = Matrix([[5, Rational(3, 2)]])\n assert A.norm() == Pow(25 + Rational(9, 4), S(1)/2)\n assert A.norm(oo) == max(A._mat)\n assert A.norm(-oo) == min(A._mat)\n\n # Matrix Tests\n # Intuitive test\n A = Matrix([[1, 1], [1, 1]])\n assert A.norm(2) == 2\n assert A.norm(-2) == 0\n assert A.norm('frobenius') == 2\n assert eye(10).norm(2) == eye(10).norm(-2) == 1\n assert A.norm(oo) == 2\n\n # Test with Symbols and more complex entries\n A = Matrix([[3, y, y], [x, S(1)/2, -pi]])\n assert (A.norm('fro')\n == sqrt(S(37)/4 + 2*abs(y)**2 + pi**2 + x**2))\n\n # Check non-square\n A = Matrix([[1, 2, -3], [4, 5, Rational(13, 2)]])\n assert A.norm(2) == sqrt(S(389)/8 + sqrt(78665)/8)\n assert A.norm(-2) == S(0)\n assert A.norm('frobenius') == sqrt(389)/2\n\n # Test properties of matrix norms\n # https://en.wikipedia.org/wiki/Matrix_norm#Definition\n # Two matrices\n A = Matrix([[1, 2], [3, 4]])\n B = Matrix([[5, 5], [-2, 2]])\n C = Matrix([[0, -I], [I, 0]])\n D = Matrix([[1, 0], [0, -1]])\n L = [A, B, C, D]\n alpha = Symbol('alpha', real=True)\n\n for order in ['fro', 2, -2]:\n # Zero Check\n assert zeros(3).norm(order) == S(0)\n # Check Triangle Inequality for all Pairs of Matrices\n for X in L:\n for Y in L:\n dif = (X.norm(order) + Y.norm(order) -\n (X + Y).norm(order))\n assert (dif >= 0)\n # Scalar multiplication linearity\n for M in [A, B, C, D]:\n dif = simplify((alpha*M).norm(order) -\n abs(alpha) * M.norm(order))\n assert dif == 0\n\n # Test Properties of Vector Norms\n # https://en.wikipedia.org/wiki/Vector_norm\n # Two column vectors\n a = Matrix([1, 1 - 1*I, -3])\n b = Matrix([S(1)/2, 1*I, 1])\n c = Matrix([-1, -1, -1])\n d = Matrix([3, 2, I])\n e = Matrix([Integer(1e2), Rational(1, 1e2), 1])\n L = [a, b, c, d, e]\n alpha = Symbol('alpha', real=True)\n\n for order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity, pi]:\n # Zero Check\n if order > 0:\n assert Matrix([0, 0, 0]).norm(order) == S(0)\n # Triangle inequality on all pairs\n if order >= 1: # Triangle InEq holds only for these norms\n for X in L:\n for Y in L:\n dif = (X.norm(order) + Y.norm(order) -\n (X + Y).norm(order))\n assert simplify(dif >= 0) is S.true\n # Linear to scalar multiplication\n if order in [1, 2, -1, -2, S.Infinity, S.NegativeInfinity]:\n for X in L:\n dif = simplify((alpha*X).norm(order) -\n (abs(alpha) * X.norm(order)))\n assert dif == 0\n\n # ord=1\n M = Matrix(3, 3, [1, 3, 0, -2, -1, 0, 3, 9, 6])\n assert M.norm(1) == 13\n\n\ndef test_condition_number():\n x = Symbol('x', real=True)\n A = eye(3)\n A[0, 0] = 10\n A[2, 2] = S(1)/10\n assert A.condition_number() == 100\n\n A[1, 1] = x\n assert A.condition_number() == Max(10, Abs(x)) / Min(S(1)/10, Abs(x))\n\n M = Matrix([[cos(x), sin(x)], [-sin(x), cos(x)]])\n Mc = M.condition_number()\n assert all(Float(1.).epsilon_eq(Mc.subs(x, val).evalf()) for val in\n [Rational(1, 5), Rational(1, 2), Rational(1, 10), pi/2, pi, 7*pi/4 ])\n\n #issue 10782\n assert Matrix([]).condition_number() == 0\n\n\ndef test_equality():\n A = Matrix(((1, 2, 3), (4, 5, 6), (7, 8, 9)))\n B = Matrix(((9, 8, 7), (6, 5, 4), (3, 2, 1)))\n assert A == A[:, :]\n assert not A != A[:, :]\n assert not A == B\n assert A != B\n assert A != 10\n assert not A == 10\n\n # A SparseMatrix can be equal to a Matrix\n C = SparseMatrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))\n D = Matrix(((1, 0, 0), (0, 1, 0), (0, 0, 1)))\n assert C == D\n assert not C != D\n\n\ndef test_col_join():\n assert eye(3).col_join(Matrix([[7, 7, 7]])) == \\\n Matrix([[1, 0, 0],\n [0, 1, 0],\n [0, 0, 1],\n [7, 7, 7]])\n\n\ndef test_row_insert():\n r4 = Matrix([[4, 4, 4]])\n for i in range(-4, 5):\n l = [1, 0, 0]\n l.insert(i, 4)\n assert flatten(eye(3).row_insert(i, r4).col(0).tolist()) == l\n\n\ndef test_col_insert():\n c4 = Matrix([4, 4, 4])\n for i in range(-4, 5):\n l = [0, 0, 0]\n l.insert(i, 4)\n assert flatten(zeros(3).col_insert(i, c4).row(0).tolist()) == l\n\n\ndef test_normalized():\n assert Matrix([3, 4]).normalized() == \\\n Matrix([Rational(3, 5), Rational(4, 5)])\n\n # Zero vector trivial cases\n assert Matrix([0, 0, 0]).normalized() == Matrix([0, 0, 0])\n\n # Machine precision error truncation trivial cases\n m = Matrix([0,0,1.e-100])\n assert m.normalized(\n iszerofunc=lambda x: x.evalf(n=10, chop=True).is_zero\n ) == Matrix([0, 0, 0])\n\n\ndef test_print_nonzero():\n assert capture(lambda: eye(3).print_nonzero()) == \\\n '[X ]\\n[ X ]\\n[ X]\\n'\n assert capture(lambda: eye(3).print_nonzero('.')) == \\\n '[. ]\\n[ . ]\\n[ .]\\n'\n\n\ndef test_zeros_eye():\n assert Matrix.eye(3) == eye(3)\n assert Matrix.zeros(3) == zeros(3)\n assert ones(3, 4) == Matrix(3, 4, [1]*12)\n\n i = Matrix([[1, 0], [0, 1]])\n z = Matrix([[0, 0], [0, 0]])\n for cls in classes:\n m = cls.eye(2)\n assert i == m # but m == i will fail if m is immutable\n assert i == eye(2, cls=cls)\n assert type(m) == cls\n m = cls.zeros(2)\n assert z == m\n assert z == zeros(2, cls=cls)\n assert type(m) == cls\n\n\ndef test_is_zero():\n assert Matrix().is_zero\n assert Matrix([[0, 0], [0, 0]]).is_zero\n assert zeros(3, 4).is_zero\n assert not eye(3).is_zero\n assert Matrix([[x, 0], [0, 0]]).is_zero == None\n assert SparseMatrix([[x, 0], [0, 0]]).is_zero == None\n assert ImmutableMatrix([[x, 0], [0, 0]]).is_zero == None\n assert ImmutableSparseMatrix([[x, 0], [0, 0]]).is_zero == None\n assert Matrix([[x, 1], [0, 0]]).is_zero == False\n a = Symbol('a', nonzero=True)\n assert Matrix([[a, 0], [0, 0]]).is_zero == False\n\n\ndef test_rotation_matrices():\n # This tests the rotation matrices by rotating about an axis and back.\n theta = pi/3\n r3_plus = rot_axis3(theta)\n r3_minus = rot_axis3(-theta)\n r2_plus = rot_axis2(theta)\n r2_minus = rot_axis2(-theta)\n r1_plus = rot_axis1(theta)\n r1_minus = rot_axis1(-theta)\n assert r3_minus*r3_plus*eye(3) == eye(3)\n assert r2_minus*r2_plus*eye(3) == eye(3)\n assert r1_minus*r1_plus*eye(3) == eye(3)\n\n # Check the correctness of the trace of the rotation matrix\n assert r1_plus.trace() == 1 + 2*cos(theta)\n assert r2_plus.trace() == 1 + 2*cos(theta)\n assert r3_plus.trace() == 1 + 2*cos(theta)\n\n # Check that a rotation with zero angle doesn't change anything.\n assert rot_axis1(0) == eye(3)\n assert rot_axis2(0) == eye(3)\n assert rot_axis3(0) == eye(3)\n\n\ndef test_DeferredVector():\n assert str(DeferredVector(\"vector\")[4]) == \"vector[4]\"\n assert sympify(DeferredVector(\"d\")) == DeferredVector(\"d\")\n\ndef test_DeferredVector_not_iterable():\n assert not iterable(DeferredVector('X'))\n\ndef test_DeferredVector_Matrix():\n raises(TypeError, lambda: Matrix(DeferredVector(\"V\")))\n\ndef test_GramSchmidt():\n R = Rational\n m1 = Matrix(1, 2, [1, 2])\n m2 = Matrix(1, 2, [2, 3])\n assert GramSchmidt([m1, m2]) == \\\n [Matrix(1, 2, [1, 2]), Matrix(1, 2, [R(2)/5, R(-1)/5])]\n assert GramSchmidt([m1.T, m2.T]) == \\\n [Matrix(2, 1, [1, 2]), Matrix(2, 1, [R(2)/5, R(-1)/5])]\n # from wikipedia\n assert GramSchmidt([Matrix([3, 1]), Matrix([2, 2])], True) == [\n Matrix([3*sqrt(10)/10, sqrt(10)/10]),\n Matrix([-sqrt(10)/10, 3*sqrt(10)/10])]\n\n\ndef test_casoratian():\n assert casoratian([1, 2, 3, 4], 1) == 0\n assert casoratian([1, 2, 3, 4], 1, zero=False) == 0\n\n\ndef test_zero_dimension_multiply():\n assert (Matrix()*zeros(0, 3)).shape == (0, 3)\n assert zeros(3, 0)*zeros(0, 3) == zeros(3, 3)\n assert zeros(0, 3)*zeros(3, 0) == Matrix()\n\n\ndef test_slice_issue_2884():\n m = Matrix(2, 2, range(4))\n assert m[1, :] == Matrix([[2, 3]])\n assert m[-1, :] == Matrix([[2, 3]])\n assert m[:, 1] == Matrix([[1, 3]]).T\n assert m[:, -1] == Matrix([[1, 3]]).T\n raises(IndexError, lambda: m[2, :])\n raises(IndexError, lambda: m[2, 2])\n\n\ndef test_slice_issue_3401():\n assert zeros(0, 3)[:, -1].shape == (0, 1)\n assert zeros(3, 0)[0, :] == Matrix(1, 0, [])\n\n\ndef test_copyin():\n s = zeros(3, 3)\n s[3] = 1\n assert s[:, 0] == Matrix([0, 1, 0])\n assert s[3] == 1\n assert s[3: 4] == [1]\n s[1, 1] = 42\n assert s[1, 1] == 42\n assert s[1, 1:] == Matrix([[42, 0]])\n s[1, 1:] = Matrix([[5, 6]])\n assert s[1, :] == Matrix([[1, 5, 6]])\n s[1, 1:] = [[42, 43]]\n assert s[1, :] == Matrix([[1, 42, 43]])\n s[0, 0] = 17\n assert s[:, :1] == Matrix([17, 1, 0])\n s[0, 0] = [1, 1, 1]\n assert s[:, 0] == Matrix([1, 1, 1])\n s[0, 0] = Matrix([1, 1, 1])\n assert s[:, 0] == Matrix([1, 1, 1])\n s[0, 0] = SparseMatrix([1, 1, 1])\n assert s[:, 0] == Matrix([1, 1, 1])\n\n\ndef test_invertible_check():\n # sometimes a singular matrix will have a pivot vector shorter than\n # the number of rows in a matrix...\n assert Matrix([[1, 2], [1, 2]]).rref() == (Matrix([[1, 2], [0, 0]]), (0,))\n raises(ValueError, lambda: Matrix([[1, 2], [1, 2]]).inv())\n m = Matrix([\n [-1, -1, 0],\n [ x, 1, 1],\n [ 1, x, -1],\n ])\n assert len(m.rref()[1]) != m.rows\n # in addition, unless simplify=True in the call to rref, the identity\n # matrix will be returned even though m is not invertible\n assert m.rref()[0] != eye(3)\n assert m.rref(simplify=signsimp)[0] != eye(3)\n raises(ValueError, lambda: m.inv(method=\"ADJ\"))\n raises(ValueError, lambda: m.inv(method=\"GE\"))\n raises(ValueError, lambda: m.inv(method=\"LU\"))\n\n\n@XFAIL\ndef test_issue_3959():\n x, y = symbols('x, y')\n e = x*y\n assert e.subs(x, Matrix([3, 5, 3])) == Matrix([3, 5, 3])*y\n\n\ndef test_issue_5964():\n assert str(Matrix([[1, 2], [3, 4]])) == 'Matrix([[1, 2], [3, 4]])'\n\n\ndef test_issue_7604():\n x, y = symbols(u\"x y\")\n assert sstr(Matrix([[x, 2*y], [y**2, x + 3]])) == \\\n 'Matrix([\\n[ x, 2*y],\\n[y**2, x + 3]])'\n\n\ndef test_is_Identity():\n assert eye(3).is_Identity\n assert eye(3).as_immutable().is_Identity\n assert not zeros(3).is_Identity\n assert not ones(3).is_Identity\n # issue 6242\n assert not Matrix([[1, 0, 0]]).is_Identity\n # issue 8854\n assert SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1}).is_Identity\n assert not SparseMatrix(2,3, range(6)).is_Identity\n assert not SparseMatrix(3,3, {(0,0):1, (1,1):1}).is_Identity\n assert not SparseMatrix(3,3, {(0,0):1, (1,1):1, (2,2):1, (0,1):2, (0,2):3}).is_Identity\n\n\ndef test_dot():\n assert ones(1, 3).dot(ones(3, 1)) == 3\n assert ones(1, 3).dot([1, 1, 1]) == 3\n assert Matrix([1, 2, 3]).dot(Matrix([1, 2, 3])) == 14\n assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I])) == -5 + I\n assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=False) == -5 + I\n assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=True) == 13 + I\n assert Matrix([1, 2, 3*I]).dot(Matrix([I, 2, 3*I]), hermitian=True, conjugate_convention=\"physics\") == 13 - I\n assert Matrix([1, 2, 3*I]).dot(Matrix([4, 5*I, 6]), hermitian=True, conjugate_convention=\"right\") == 4 + 8*I\n assert Matrix([1, 2, 3*I]).dot(Matrix([4, 5*I, 6]), hermitian=True, conjugate_convention=\"left\") == 4 - 8*I\n assert Matrix([I, 2*I]).dot(Matrix([I, 2*I]), hermitian=False, conjugate_convention=\"left\") == -5\n assert Matrix([I, 2*I]).dot(Matrix([I, 2*I]), conjugate_convention=\"left\") == 5\n\n\ndef test_dual():\n B_x, B_y, B_z, E_x, E_y, E_z = symbols(\n 'B_x B_y B_z E_x E_y E_z', real=True)\n F = Matrix((\n ( 0, E_x, E_y, E_z),\n (-E_x, 0, B_z, -B_y),\n (-E_y, -B_z, 0, B_x),\n (-E_z, B_y, -B_x, 0)\n ))\n Fd = Matrix((\n ( 0, -B_x, -B_y, -B_z),\n (B_x, 0, E_z, -E_y),\n (B_y, -E_z, 0, E_x),\n (B_z, E_y, -E_x, 0)\n ))\n assert F.dual().equals(Fd)\n assert eye(3).dual().equals(zeros(3))\n assert F.dual().dual().equals(-F)\n\n\ndef test_anti_symmetric():\n assert Matrix([1, 2]).is_anti_symmetric() is False\n m = Matrix(3, 3, [0, x**2 + 2*x + 1, y, -(x + 1)**2, 0, x*y, -y, -x*y, 0])\n assert m.is_anti_symmetric() is True\n assert m.is_anti_symmetric(simplify=False) is False\n assert m.is_anti_symmetric(simplify=lambda x: x) is False\n\n # tweak to fail\n m[2, 1] = -m[2, 1]\n assert m.is_anti_symmetric() is False\n # untweak\n m[2, 1] = -m[2, 1]\n\n m = m.expand()\n assert m.is_anti_symmetric(simplify=False) is True\n m[0, 0] = 1\n assert m.is_anti_symmetric() is False\n\n\ndef test_normalize_sort_diogonalization():\n A = Matrix(((1, 2), (2, 1)))\n P, Q = A.diagonalize(normalize=True)\n assert P*P.T == P.T*P == eye(P.cols)\n P, Q = A.diagonalize(normalize=True, sort=True)\n assert P*P.T == P.T*P == eye(P.cols)\n assert P*Q*P.inv() == A\n\n\ndef test_issue_5321():\n raises(ValueError, lambda: Matrix([[1, 2, 3], Matrix(0, 1, [])]))\n\n\ndef test_issue_5320():\n assert Matrix.hstack(eye(2), 2*eye(2)) == Matrix([\n [1, 0, 2, 0],\n [0, 1, 0, 2]\n ])\n assert Matrix.vstack(eye(2), 2*eye(2)) == Matrix([\n [1, 0],\n [0, 1],\n [2, 0],\n [0, 2]\n ])\n cls = SparseMatrix\n assert cls.hstack(cls(eye(2)), cls(2*eye(2))) == Matrix([\n [1, 0, 2, 0],\n [0, 1, 0, 2]\n ])\n\ndef test_issue_11944():\n A = Matrix([[1]])\n AIm = sympify(A)\n assert Matrix.hstack(AIm, A) == Matrix([[1, 1]])\n assert Matrix.vstack(AIm, A) == Matrix([[1], [1]])\n\ndef test_cross():\n a = [1, 2, 3]\n b = [3, 4, 5]\n col = Matrix([-2, 4, -2])\n row = col.T\n\n def test(M, ans):\n assert ans == M\n assert type(M) == cls\n for cls in classes:\n A = cls(a)\n B = cls(b)\n test(A.cross(B), col)\n test(A.cross(B.T), col)\n test(A.T.cross(B.T), row)\n test(A.T.cross(B), row)\n raises(ShapeError, lambda:\n Matrix(1, 2, [1, 1]).cross(Matrix(1, 2, [1, 1])))\n\n\ndef test_hash():\n for cls in classes[-2:]:\n s = {cls.eye(1), cls.eye(1)}\n assert len(s) == 1 and s.pop() == cls.eye(1)\n # issue 3979\n for cls in classes[:2]:\n assert not isinstance(cls.eye(1), Hashable)\n\n\n@XFAIL\ndef test_issue_3979():\n # when this passes, delete this and change the [1:2]\n # to [:2] in the test_hash above for issue 3979\n cls = classes[0]\n raises(AttributeError, lambda: hash(cls.eye(1)))\n\n\ndef test_adjoint():\n dat = [[0, I], [1, 0]]\n ans = Matrix([[0, 1], [-I, 0]])\n for cls in classes:\n assert ans == cls(dat).adjoint()\n\ndef test_simplify_immutable():\n from sympy import simplify, sin, cos\n assert simplify(ImmutableMatrix([[sin(x)**2 + cos(x)**2]])) == \\\n ImmutableMatrix([[1]])\n\ndef test_rank():\n from sympy.abc import x\n m = Matrix([[1, 2], [x, 1 - 1/x]])\n assert m.rank() == 2\n n = Matrix(3, 3, range(1, 10))\n assert n.rank() == 2\n p = zeros(3)\n assert p.rank() == 0\n\ndef test_issue_11434():\n ax, ay, bx, by, cx, cy, dx, dy, ex, ey, t0, t1 = \\\n symbols('a_x a_y b_x b_y c_x c_y d_x d_y e_x e_y t_0 t_1')\n M = Matrix([[ax, ay, ax*t0, ay*t0, 0],\n [bx, by, bx*t0, by*t0, 0],\n [cx, cy, cx*t0, cy*t0, 1],\n [dx, dy, dx*t0, dy*t0, 1],\n [ex, ey, 2*ex*t1 - ex*t0, 2*ey*t1 - ey*t0, 0]])\n assert M.rank() == 4\n\ndef test_rank_regression_from_so():\n # see:\n # https://stackoverflow.com/questions/19072700/why-does-sympy-give-me-the-wrong-answer-when-i-row-reduce-a-symbolic-matrix\n\n nu, lamb = symbols('nu, lambda')\n A = Matrix([[-3*nu, 1, 0, 0],\n [ 3*nu, -2*nu - 1, 2, 0],\n [ 0, 2*nu, (-1*nu) - lamb - 2, 3],\n [ 0, 0, nu + lamb, -3]])\n expected_reduced = Matrix([[1, 0, 0, 1/(nu**2*(-lamb - nu))],\n [0, 1, 0, 3/(nu*(-lamb - nu))],\n [0, 0, 1, 3/(-lamb - nu)],\n [0, 0, 0, 0]])\n expected_pivots = (0, 1, 2)\n\n reduced, pivots = A.rref()\n\n assert simplify(expected_reduced - reduced) == zeros(*A.shape)\n assert pivots == expected_pivots\n\ndef test_replace():\n from sympy import symbols, Function, Matrix\n F, G = symbols('F, G', cls=Function)\n K = Matrix(2, 2, lambda i, j: G(i+j))\n M = Matrix(2, 2, lambda i, j: F(i+j))\n N = M.replace(F, G)\n assert N == K\n\ndef test_replace_map():\n from sympy import symbols, Function, Matrix\n F, G = symbols('F, G', cls=Function)\n K = Matrix(2, 2, [(G(0), {F(0): G(0)}), (G(1), {F(1): G(1)}), (G(1), {F(1)\\\n : G(1)}), (G(2), {F(2): G(2)})])\n M = Matrix(2, 2, lambda i, j: F(i+j))\n N = M.replace(F, G, True)\n assert N == K\n\ndef test_atoms():\n m = Matrix([[1, 2], [x, 1 - 1/x]])\n assert m.atoms() == {S(1),S(2),S(-1), x}\n assert m.atoms(Symbol) == {x}\n\n@slow\ndef test_pinv():\n # Pseudoinverse of an invertible matrix is the inverse.\n A1 = Matrix([[a, b], [c, d]])\n assert simplify(A1.pinv()) == simplify(A1.inv())\n # Test the four properties of the pseudoinverse for various matrices.\n As = [Matrix([[13, 104], [2212, 3], [-3, 5]]),\n Matrix([[1, 7, 9], [11, 17, 19]]),\n Matrix([a, b])]\n for A in As:\n A_pinv = A.pinv()\n AAp = A * A_pinv\n ApA = A_pinv * A\n assert simplify(AAp * A) == A\n assert simplify(ApA * A_pinv) == A_pinv\n assert AAp.H == AAp\n assert ApA.H == ApA\n\ndef test_pinv_solve():\n # Fully determined system (unique result, identical to other solvers).\n A = Matrix([[1, 5], [7, 9]])\n B = Matrix([12, 13])\n assert A.pinv_solve(B) == A.cholesky_solve(B)\n assert A.pinv_solve(B) == A.LDLsolve(B)\n assert A.pinv_solve(B) == Matrix([sympify('-43/26'), sympify('71/26')])\n assert A * A.pinv() * B == B\n # Fully determined, with two-dimensional B matrix.\n B = Matrix([[12, 13, 14], [15, 16, 17]])\n assert A.pinv_solve(B) == A.cholesky_solve(B)\n assert A.pinv_solve(B) == A.LDLsolve(B)\n assert A.pinv_solve(B) == Matrix([[-33, -37, -41], [69, 75, 81]]) / 26\n assert A * A.pinv() * B == B\n # Underdetermined system (infinite results).\n A = Matrix([[1, 0, 1], [0, 1, 1]])\n B = Matrix([5, 7])\n solution = A.pinv_solve(B)\n w = {}\n for s in solution.atoms(Symbol):\n # Extract dummy symbols used in the solution.\n w[s.name] = s\n assert solution == Matrix([[w['w0_0']/3 + w['w1_0']/3 - w['w2_0']/3 + 1],\n [w['w0_0']/3 + w['w1_0']/3 - w['w2_0']/3 + 3],\n [-w['w0_0']/3 - w['w1_0']/3 + w['w2_0']/3 + 4]])\n assert A * A.pinv() * B == B\n # Overdetermined system (least squares results).\n A = Matrix([[1, 0], [0, 0], [0, 1]])\n B = Matrix([3, 2, 1])\n assert A.pinv_solve(B) == Matrix([3, 1])\n # Proof the solution is not exact.\n assert A * A.pinv() * B != B\n\ndef test_pinv_rank_deficient():\n # Test the four properties of the pseudoinverse for various matrices.\n As = [Matrix([[1, 1, 1], [2, 2, 2]]),\n Matrix([[1, 0], [0, 0]]),\n Matrix([[1, 2], [2, 4], [3, 6]])]\n for A in As:\n A_pinv = A.pinv()\n AAp = A * A_pinv\n ApA = A_pinv * A\n assert simplify(AAp * A) == A\n assert simplify(ApA * A_pinv) == A_pinv\n assert AAp.H == AAp\n assert ApA.H == ApA\n # Test solving with rank-deficient matrices.\n A = Matrix([[1, 0], [0, 0]])\n # Exact, non-unique solution.\n B = Matrix([3, 0])\n solution = A.pinv_solve(B)\n w1 = solution.atoms(Symbol).pop()\n assert w1.name == 'w1_0'\n assert solution == Matrix([3, w1])\n assert A * A.pinv() * B == B\n # Least squares, non-unique solution.\n B = Matrix([3, 1])\n solution = A.pinv_solve(B)\n w1 = solution.atoms(Symbol).pop()\n assert w1.name == 'w1_0'\n assert solution == Matrix([3, w1])\n assert A * A.pinv() * B != B\n\n@XFAIL\ndef test_pinv_rank_deficient_when_diagonalization_fails():\n # Test the four properties of the pseudoinverse for matrices when\n # diagonalization of A.H*A fails.'\n As = [Matrix([\n [61, 89, 55, 20, 71, 0],\n [62, 96, 85, 85, 16, 0],\n [69, 56, 17, 4, 54, 0],\n [10, 54, 91, 41, 71, 0],\n [ 7, 30, 10, 48, 90, 0],\n [0,0,0,0,0,0]])]\n for A in As:\n A_pinv = A.pinv()\n AAp = A * A_pinv\n ApA = A_pinv * A\n assert simplify(AAp * A) == A\n assert simplify(ApA * A_pinv) == A_pinv\n assert AAp.H == AAp\n assert ApA.H == ApA\n\n\ndef test_gauss_jordan_solve():\n\n # Square, full rank, unique solution\n A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 10]])\n b = Matrix([3, 6, 9])\n sol, params = A.gauss_jordan_solve(b)\n assert sol == Matrix([[-1], [2], [0]])\n assert params == Matrix(0, 1, [])\n\n # Square, reduced rank, parametrized solution\n A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n b = Matrix([3, 6, 9])\n sol, params, freevar = A.gauss_jordan_solve(b, freevar=True)\n w = {}\n for s in sol.atoms(Symbol):\n # Extract dummy symbols used in the solution.\n w[s.name] = s\n assert sol == Matrix([[w['tau0'] - 1], [-2*w['tau0'] + 2], [w['tau0']]])\n assert params == Matrix([[w['tau0']]])\n assert freevar == [2]\n\n # Square, reduced rank, parametrized solution\n A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])\n b = Matrix([0, 0, 0])\n sol, params = A.gauss_jordan_solve(b)\n w = {}\n for s in sol.atoms(Symbol):\n w[s.name] = s\n assert sol == Matrix([[-2*w['tau0'] - 3*w['tau1']],\n [w['tau0']], [w['tau1']]])\n assert params == Matrix([[w['tau0']], [w['tau1']]])\n\n # Square, reduced rank, parametrized solution\n A = Matrix([[0, 0, 0], [0, 0, 0], [0, 0, 0]])\n b = Matrix([0, 0, 0])\n sol, params = A.gauss_jordan_solve(b)\n w = {}\n for s in sol.atoms(Symbol):\n w[s.name] = s\n assert sol == Matrix([[w['tau0']], [w['tau1']], [w['tau2']]])\n assert params == Matrix([[w['tau0']], [w['tau1']], [w['tau2']]])\n\n # Square, reduced rank, no solution\n A = Matrix([[1, 2, 3], [2, 4, 6], [3, 6, 9]])\n b = Matrix([0, 0, 1])\n raises(ValueError, lambda: A.gauss_jordan_solve(b))\n\n # Rectangular, tall, full rank, unique solution\n A = Matrix([[1, 5, 3], [2, 1, 6], [1, 7, 9], [1, 4, 3]])\n b = Matrix([0, 0, 1, 0])\n sol, params = A.gauss_jordan_solve(b)\n assert sol == Matrix([[-S(1)/2], [0], [S(1)/6]])\n assert params == Matrix(0, 1, [])\n\n # Rectangular, tall, full rank, no solution\n A = Matrix([[1, 5, 3], [2, 1, 6], [1, 7, 9], [1, 4, 3]])\n b = Matrix([0, 0, 0, 1])\n raises(ValueError, lambda: A.gauss_jordan_solve(b))\n\n # Rectangular, tall, reduced rank, parametrized solution\n A = Matrix([[1, 5, 3], [2, 10, 6], [3, 15, 9], [1, 4, 3]])\n b = Matrix([0, 0, 0, 1])\n sol, params = A.gauss_jordan_solve(b)\n w = {}\n for s in sol.atoms(Symbol):\n w[s.name] = s\n assert sol == Matrix([[-3*w['tau0'] + 5], [-1], [w['tau0']]])\n assert params == Matrix([[w['tau0']]])\n\n # Rectangular, tall, reduced rank, no solution\n A = Matrix([[1, 5, 3], [2, 10, 6], [3, 15, 9], [1, 4, 3]])\n b = Matrix([0, 0, 1, 1])\n raises(ValueError, lambda: A.gauss_jordan_solve(b))\n\n # Rectangular, wide, full rank, parametrized solution\n A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 1, 12]])\n b = Matrix([1, 1, 1])\n sol, params = A.gauss_jordan_solve(b)\n w = {}\n for s in sol.atoms(Symbol):\n w[s.name] = s\n assert sol == Matrix([[2*w['tau0'] - 1], [-3*w['tau0'] + 1], [0],\n [w['tau0']]])\n assert params == Matrix([[w['tau0']]])\n\n # Rectangular, wide, reduced rank, parametrized solution\n A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [2, 4, 6, 8]])\n b = Matrix([0, 1, 0])\n sol, params = A.gauss_jordan_solve(b)\n w = {}\n for s in sol.atoms(Symbol):\n w[s.name] = s\n assert sol == Matrix([[w['tau0'] + 2*w['tau1'] + 1/S(2)],\n [-2*w['tau0'] - 3*w['tau1'] - 1/S(4)],\n [w['tau0']], [w['tau1']]])\n assert params == Matrix([[w['tau0']], [w['tau1']]])\n # watch out for clashing symbols\n x0, x1, x2, _x0 = symbols('_tau0 _tau1 _tau2 tau1')\n M = Matrix([[0, 1, 0, 0, 0, 0], [0, 0, 0, 1, 0, _x0]])\n A = M[:, :-1]\n b = M[:, -1:]\n sol, params = A.gauss_jordan_solve(b)\n assert params == Matrix(3, 1, [x0, x1, x2])\n assert sol == Matrix(5, 1, [x1, 0, x0, _x0, x2])\n\n # Rectangular, wide, reduced rank, no solution\n A = Matrix([[1, 2, 3, 4], [5, 6, 7, 8], [2, 4, 6, 8]])\n b = Matrix([1, 1, 1])\n raises(ValueError, lambda: A.gauss_jordan_solve(b))\n\ndef test_solve():\n A = Matrix([[1,2], [2,4]])\n b = Matrix([[3], [4]])\n raises(ValueError, lambda: A.solve(b)) #no solution\n b = Matrix([[ 4], [8]])\n raises(ValueError, lambda: A.solve(b)) #infinite solution\n\ndef test_issue_7201():\n assert ones(0, 1) + ones(0, 1) == Matrix(0, 1, [])\n assert ones(1, 0) + ones(1, 0) == Matrix(1, 0, [])\n\ndef test_free_symbols():\n for M in ImmutableMatrix, ImmutableSparseMatrix, Matrix, SparseMatrix:\n assert M([[x], [0]]).free_symbols == {x}\n\ndef test_from_ndarray():\n \"\"\"See issue 7465.\"\"\"\n try:\n from numpy import array\n except ImportError:\n skip('NumPy must be available to test creating matrices from ndarrays')\n\n assert Matrix(array([1, 2, 3])) == Matrix([1, 2, 3])\n assert Matrix(array([[1, 2, 3]])) == Matrix([[1, 2, 3]])\n assert Matrix(array([[1, 2, 3], [4, 5, 6]])) == \\\n Matrix([[1, 2, 3], [4, 5, 6]])\n assert Matrix(array([x, y, z])) == Matrix([x, y, z])\n raises(NotImplementedError, lambda: Matrix(array([[\n [1, 2], [3, 4]], [[5, 6], [7, 8]]])))\n\ndef test_hermitian():\n a = Matrix([[1, I], [-I, 1]])\n assert a.is_hermitian\n a[0, 0] = 2*I\n assert a.is_hermitian is False\n a[0, 0] = x\n assert a.is_hermitian is None\n a[0, 1] = a[1, 0]*I\n assert a.is_hermitian is False\n\ndef test_doit():\n a = Matrix([[Add(x,x, evaluate=False)]])\n assert a[0] != 2*x\n assert a.doit() == Matrix([[2*x]])\n\ndef test_issue_9457_9467_9876():\n # for row_del(index)\n M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n M.row_del(1)\n assert M == Matrix([[1, 2, 3], [3, 4, 5]])\n N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n N.row_del(-2)\n assert N == Matrix([[1, 2, 3], [3, 4, 5]])\n O = Matrix([[1, 2, 3], [5, 6, 7], [9, 10, 11]])\n O.row_del(-1)\n assert O == Matrix([[1, 2, 3], [5, 6, 7]])\n P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(IndexError, lambda: P.row_del(10))\n Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(IndexError, lambda: Q.row_del(-10))\n\n # for col_del(index)\n M = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n M.col_del(1)\n assert M == Matrix([[1, 3], [2, 4], [3, 5]])\n N = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n N.col_del(-2)\n assert N == Matrix([[1, 3], [2, 4], [3, 5]])\n P = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(IndexError, lambda: P.col_del(10))\n Q = Matrix([[1, 2, 3], [2, 3, 4], [3, 4, 5]])\n raises(IndexError, lambda: Q.col_del(-10))\n\ndef test_issue_9422():\n x, y = symbols('x y', commutative=False)\n a, b = symbols('a b')\n M = eye(2)\n M1 = Matrix(2, 2, [x, y, y, z])\n assert y*x*M != x*y*M\n assert b*a*M == a*b*M\n assert x*M1 != M1*x\n assert a*M1 == M1*a\n assert y*x*M == Matrix([[y*x, 0], [0, y*x]])\n\n\ndef test_issue_10770():\n M = Matrix([])\n a = ['col_insert', 'row_join'], Matrix([9, 6, 3])\n b = ['row_insert', 'col_join'], a[1].T\n c = ['row_insert', 'col_insert'], Matrix([[1, 2], [3, 4]])\n for ops, m in (a, b, c):\n for op in ops:\n f = getattr(M, op)\n new = f(m) if 'join' in op else f(42, m)\n assert new == m and id(new) != id(m)\n\n\ndef test_issue_10658():\n A = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n assert A.extract([0, 1, 2], [True, True, False]) == \\\n Matrix([[1, 2], [4, 5], [7, 8]])\n assert A.extract([0, 1, 2], [True, False, False]) == Matrix([[1], [4], [7]])\n assert A.extract([True, False, False], [0, 1, 2]) == Matrix([[1, 2, 3]])\n assert A.extract([True, False, True], [0, 1, 2]) == \\\n Matrix([[1, 2, 3], [7, 8, 9]])\n assert A.extract([0, 1, 2], [False, False, False]) == Matrix(3, 0, [])\n assert A.extract([False, False, False], [0, 1, 2]) == Matrix(0, 3, [])\n assert A.extract([True, False, True], [False, True, False]) == \\\n Matrix([[2], [8]])\n\ndef test_opportunistic_simplification():\n # this test relates to issue #10718, #9480, #11434\n\n # issue #9480\n m = Matrix([[-5 + 5*sqrt(2), -5], [-5*sqrt(2)/2 + 5, -5*sqrt(2)/2]])\n assert m.rank() == 1\n\n # issue #10781\n m = Matrix([[3+3*sqrt(3)*I, -9],[4,-3+3*sqrt(3)*I]])\n assert simplify(m.rref()[0] - Matrix([[1, -9/(3 + 3*sqrt(3)*I)], [0, 0]])) == zeros(2, 2)\n\n # issue #11434\n ax,ay,bx,by,cx,cy,dx,dy,ex,ey,t0,t1 = symbols('a_x a_y b_x b_y c_x c_y d_x d_y e_x e_y t_0 t_1')\n m = Matrix([[ax,ay,ax*t0,ay*t0,0],[bx,by,bx*t0,by*t0,0],[cx,cy,cx*t0,cy*t0,1],[dx,dy,dx*t0,dy*t0,1],[ex,ey,2*ex*t1-ex*t0,2*ey*t1-ey*t0,0]])\n assert m.rank() == 4\n\ndef test_partial_pivoting():\n # example from https://en.wikipedia.org/wiki/Pivot_element\n # partial pivoting with back subsitution gives a perfect result\n # naive pivoting give an error ~1e-13, so anything better than\n # 1e-15 is good\n mm=Matrix([[0.003 ,59.14, 59.17],[ 5.291, -6.13,46.78]])\n assert (mm.rref()[0] - Matrix([[1.0, 0, 10.0], [ 0, 1.0, 1.0]])).norm() < 1e-15\n\n # issue #11549\n m_mixed = Matrix([[6e-17, 1.0, 4],[ -1.0, 0, 8],[ 0, 0, 1]])\n m_float = Matrix([[6e-17, 1.0, 4.],[ -1.0, 0., 8.],[ 0., 0., 1.]])\n m_inv = Matrix([[ 0, -1.0, 8.0],[1.0, 6.0e-17, -4.0],[ 0, 0, 1]])\n # this example is numerically unstable and involves a matrix with a norm >= 8,\n # this comparing the difference of the results with 1e-15 is numerically sound.\n assert (m_mixed.inv() - m_inv).norm() < 1e-15\n assert (m_float.inv() - m_inv).norm() < 1e-15\n\ndef test_iszero_substitution():\n \"\"\" When doing numerical computations, all elements that pass\n the iszerofunc test should be set to numerically zero if they\n aren't already. \"\"\"\n\n # Matrix from issue #9060\n m = Matrix([[0.9, -0.1, -0.2, 0],[-0.8, 0.9, -0.4, 0],[-0.1, -0.8, 0.6, 0]])\n m_rref = m.rref(iszerofunc=lambda x: abs(x)<6e-15)[0]\n m_correct = Matrix([[1.0, 0, -0.301369863013699, 0],[ 0, 1.0, -0.712328767123288, 0],[ 0, 0, 0, 0]])\n m_diff = m_rref - m_correct\n assert m_diff.norm() < 1e-15\n # if a zero-substitution wasn't made, this entry will be -1.11022302462516e-16\n assert m_rref[2,2] == 0\n\n\n@slow\ndef test_issue_11238():\n from sympy import Point\n xx = 8*tan(13*pi/45)/(tan(13*pi/45) + sqrt(3))\n yy = (-8*sqrt(3)*tan(13*pi/45)**2 + 24*tan(13*pi/45))/(-3 + tan(13*pi/45)**2)\n p1 = Point(0, 0)\n p2 = Point(1, -sqrt(3))\n p0 = Point(xx,yy)\n m1 = Matrix([p1 - simplify(p0), p2 - simplify(p0)])\n m2 = Matrix([p1 - p0, p2 - p0])\n m3 = Matrix([simplify(p1 - p0), simplify(p2 - p0)])\n\n assert m1.rank(simplify=True) == 1\n assert m2.rank(simplify=True) == 1\n assert m3.rank(simplify=True) == 1\n\ndef test_as_real_imag():\n m1 = Matrix(2,2,[1,2,3,4])\n m2 = m1*S.ImaginaryUnit\n m3 = m1 + m2\n\n for kls in classes:\n a,b = kls(m3).as_real_imag()\n assert list(a) == list(m1)\n assert list(b) == list(m1)\n\ndef test_deprecated():\n # Maintain tests for deprecated functions. We must capture\n # the deprecation warnings. When the deprecated functionality is\n # removed, the corresponding tests should be removed.\n\n m = Matrix(3, 3, [0, 1, 0, -4, 4, 0, -2, 1, 2])\n P, Jcells = m.jordan_cells()\n assert Jcells[1] == Matrix(1, 1, [2])\n assert Jcells[0] == Matrix(2, 2, [2, 1, 0, 2])\n\n with warns_deprecated_sympy():\n assert Matrix([[1,2],[3,4]]).dot(Matrix([[1,3],[4,5]])) == [10, 19, 14, 28]\n\n\ndef test_issue_14489():\n from sympy import Mod\n A = Matrix([-1, 1, 2])\n B = Matrix([10, 20, -15])\n\n assert Mod(A, 3) == Matrix([2, 1, 2])\n assert Mod(B, 4) == Matrix([2, 0, 1])\n\ndef test_issue_14517():\n M = Matrix([\n [ 0, 10*I, 10*I, 0],\n [10*I, 0, 0, 10*I],\n [10*I, 0, 5 + 2*I, 10*I],\n [ 0, 10*I, 10*I, 5 + 2*I]])\n ev = M.eigenvals()\n # test one random eigenvalue, the computation is a little slow\n test_ev = random.choice(list(ev.keys()))\n assert (M - test_ev*eye(4)).det() == 0\n\ndef test_issue_14943():\n # Test that __array__ accepts the optional dtype argument\n try:\n from numpy import array\n except ImportError:\n skip('NumPy must be available to test creating matrices from ndarrays')\n\n M = Matrix([[1,2], [3,4]])\n assert array(M, dtype=float).dtype.name == 'float64'\n\ndef test_issue_8240():\n # Eigenvalues of large triangular matrices\n n = 200\n\n diagonal_variables = [Symbol('x%s' % i) for i in range(n)]\n M = [[0 for i in range(n)] for j in range(n)]\n for i in range(n):\n M[i][i] = diagonal_variables[i]\n M = Matrix(M)\n\n eigenvals = M.eigenvals()\n assert len(eigenvals) == n\n for i in range(n):\n assert eigenvals[diagonal_variables[i]] == 1\n\n eigenvals = M.eigenvals(multiple=True)\n assert set(eigenvals) == set(diagonal_variables)\n\n # with multiplicity\n M = Matrix([[x, 0, 0], [1, y, 0], [2, 3, x]])\n eigenvals = M.eigenvals()\n assert eigenvals == {x: 2, y: 1}\n\n eigenvals = M.eigenvals(multiple=True)\n assert len(eigenvals) == 3\n assert eigenvals.count(x) == 2\n assert eigenvals.count(y) == 1\n\ndef test_legacy_det():\n # Minimal support for legacy keys for 'method' in det()\n # Partially copied from test_determinant()\n\n M = Matrix(( ( 3, -2, 0, 5),\n (-2, 1, -2, 2),\n ( 0, -2, 5, 0),\n ( 5, 0, 3, 4) ))\n\n assert M.det(method=\"bareis\") == -289\n assert M.det(method=\"det_lu\") == -289\n assert M.det(method=\"det_LU\") == -289\n\n M = Matrix(( (3, 2, 0, 0, 0),\n (0, 3, 2, 0, 0),\n (0, 0, 3, 2, 0),\n (0, 0, 0, 3, 2),\n (2, 0, 0, 0, 3) ))\n\n assert M.det(method=\"bareis\") == 275\n assert M.det(method=\"det_lu\") == 275\n assert M.det(method=\"Bareis\") == 275\n\n M = Matrix(( (1, 0, 1, 2, 12),\n (2, 0, 1, 1, 4),\n (2, 1, 1, -1, 3),\n (3, 2, -1, 1, 8),\n (1, 1, 1, 0, 6) ))\n\n assert M.det(method=\"bareis\") == -55\n assert M.det(method=\"det_lu\") == -55\n assert M.det(method=\"BAREISS\") == -55\n\n M = Matrix(( (-5, 2, 3, 4, 5),\n ( 1, -4, 3, 4, 5),\n ( 1, 2, -3, 4, 5),\n ( 1, 2, 3, -2, 5),\n ( 1, 2, 3, 4, -1) ))\n\n assert M.det(method=\"bareis\") == 11664\n assert M.det(method=\"det_lu\") == 11664\n assert M.det(method=\"BERKOWITZ\") == 11664\n\n M = Matrix(( ( 2, 7, -1, 3, 2),\n ( 0, 0, 1, 0, 1),\n (-2, 0, 7, 0, 2),\n (-3, -2, 4, 5, 3),\n ( 1, 0, 0, 0, 1) ))\n\n assert M.det(method=\"bareis\") == 123\n assert M.det(method=\"det_lu\") == 123\n assert M.det(method=\"LU\") == 123\n\ndef test_case_6913():\n m = MatrixSymbol('m', 1, 1)\n a = Symbol(\"a\")\n a = m[0, 0]>0\n assert str(a) == 'm[0, 0] > 0'\n\ndef test_issue_15872():\n A = Matrix([[1, 1, 1, 0], [-2, -1, 0, -1], [0, 0, -1, -1], [0, 0, 2, 1]])\n B = A - Matrix.eye(4) * I\n assert B.rank() == 3\n assert (B**2).rank() == 2\n assert (B**3).rank() == 2\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
0h-n0/DL_benchmarks | [
"64ede26c0a9b0b30abdb22474f1af73d89449052"
] | [
"examples/nosacred_th.py"
] | [
"import sys\nimport time\nimport copy\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\n\n\nclass Iterator(object):\n def __init__(self, data_type, image_shape, sequence_shape, niteration,\n batch_size, label_size, target_type):\n self.data_type = data_type\n self.image_shape = image_shape\n self.sequence_shape = sequence_shape\n self.niteration = niteration\n self.batch_size = batch_size\n self.label_size = label_size\n self.target_type = target_type\n self._i = 0\n \n def __iter__(self):\n return self\n \n def __next__(self):\n if self._i == self.niteration:\n raise StopIteration()\n self._i += 1\n \n if self.data_type == 'image':\n ### data dimension = [batch, channel, height, width]\n dims = np.prod(self.image_shape)\n data = np.random.random(dims * self.batch_size)\n data = data.reshape(self.batch_size, *self.image_shape)\n ### target dimension = [batch]\n _target = np.random.randint(self.label_size, size=self.batch_size)\n if self.target_type == 'one-hot':\n target = np.zeros((self.batch_size, self.label_size))\n target[np.arange(self.batch_size), _target] = 1\n else:\n target = _target\n \n elif self.data_type == 'sequence':\n data = np.random.random()\n \n return (data, target)\n\n def __len__(self):\n return self.niteration\n\nclass CNN(nn.Module):\n def __init__(self, channel, xdim, ydim, output_num):\n super(CNN, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(channel, 180, (xdim, 3), stride=1),\n nn.ReLU(),\n nn.Conv2d(180, 180, (1, 3)),\n nn.ReLU(),\n nn.MaxPool2d((1, 2), stride=2, ceil_mode=True),\n nn.Conv2d(180, 180, (1, 3), stride=1),\n nn.ReLU(),\n nn.Conv2d(180, 180, (1, 3)),\n nn.ReLU(),\n nn.MaxPool2d((1, 2), stride=2, ceil_mode=True),\n nn.Conv2d(180, 180, (1, 3), stride=1),\n nn.ReLU(),\n nn.Conv2d(180, 180, (1, 3)),\n nn.ReLU())\n self.fc = nn.Sequential(\n nn.Linear(540, 2048),\n nn.ReLU(),\n nn.Linear(2048, 2048),\n nn.ReLU(),\n nn.Linear(2048, output_num))\n\n def forward(self, x):\n h = self.conv(x)\n h = h.view(len(h), -1)\n return self.fc(h)\n \nif __name__ == \"__main__\":\n ngpu = int(sys.argv[1])\n channel = 3\n xdim = 19\n ydim = 40\n output_num = 3000\n data_type = 'image'\n data_config = dict(\n image_shape = (3, 19, 40), # (channel, witdth, height)\n sequence_shape = 28, # feature\n niteration = 1000,\n batch_size = 2000,\n label_size = 3000,\n target_type = None\n )\n\n iterator = Iterator(data_type, **data_config)\n torch.backends.cudnn.benchmark = True\n \n gpus = [i for i in range(ngpu)]\n \n model = CNN(channel, xdim, ydim, output_num)\n optimizer = optim.SGD(model.parameters(),\n lr=0.1,\n momentum=0.9)\n\n model = torch.nn.DataParallel(model, device_ids=gpus)\n model.cuda()\n criterion = torch.nn.CrossEntropyLoss().cuda()\n \n model.train()\n \n for idx, (x, t) in enumerate(iterator):\n start = time.time()\n x = torch.FloatTensor(x)\n t = torch.LongTensor(t)\n x, t = Variable(x), Variable(t) \n x = x.cuda()\n t = t.cuda()\n\n x = model(x)\n\n optimizer.zero_grad()\n loss = criterion(x, t)\n loss.backward()\n optimizer.step()\n print(time.time() - start)\n"
] | [
[
"torch.LongTensor",
"torch.nn.CrossEntropyLoss",
"numpy.random.random",
"numpy.arange",
"torch.nn.ReLU",
"torch.nn.Conv2d",
"torch.autograd.Variable",
"torch.nn.MaxPool2d",
"torch.nn.Linear",
"torch.FloatTensor",
"numpy.prod",
"torch.nn.DataParallel",
"numpy.zeros",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vrmusketeers/highway-env | [
"9c2354b375d7a367239099417cfe8d79e0e97e40"
] | [
"highway_env/vehicle/uncertainty/prediction.py"
] | [
"import copy\nfrom typing import List, Tuple, Callable, Union, TYPE_CHECKING\nimport numpy as np\n\nfrom highway_env import utils\nfrom highway_env.interval import polytope, vector_interval_section, integrator_interval, \\\n interval_negative_part, intervals_diff, intervals_product, LPV, interval_absolute_to_local, \\\n interval_local_to_absolute\nfrom highway_env.road.road import Route, LaneIndex, Road\nfrom highway_env.utils import Vector\nfrom highway_env.vehicle.behavior import LinearVehicle\nfrom highway_env.vehicle.controller import MDPVehicle\nfrom highway_env.vehicle.kinematics import Vehicle\n\nif TYPE_CHECKING:\n from highway_env.vehicle.objects import RoadObject\n\nPolytope = Tuple[np.ndarray, List[np.ndarray]]\n\n\nclass IntervalVehicle(LinearVehicle):\n\n \"\"\"\n Estimator for the interval-membership of a LinearVehicle under parameter uncertainty.\n\n The model trajectory is stored in a model_vehicle, and the lower and upper bounds of the states are stored\n in a min_vehicle and max_vehicle. Note that these vehicles do not follow a proper Vehicle dynamics, and\n are only used for storage of the bounds.\n \"\"\"\n\n def __init__(self,\n road: Road,\n position: Vector,\n heading: float = 0,\n speed: float = 0,\n target_lane_index: LaneIndex = None,\n target_speed: float = None,\n route: Route = None,\n enable_lane_change: bool = True,\n timer: float = None,\n theta_a_i: List[List[float]] = None,\n theta_b_i: List[List[float]] = None,\n data: dict = None) -> None:\n \"\"\"\n :param theta_a_i: The interval of possible acceleration parameters\n :param theta_b_i: The interval of possible steering parameters\n \"\"\"\n super().__init__(road,\n position,\n heading,\n speed,\n target_lane_index,\n target_speed,\n route,\n enable_lane_change,\n timer)\n self.theta_a_i = theta_a_i if theta_a_i is not None else LinearVehicle.ACCELERATION_RANGE\n self.theta_b_i = theta_b_i if theta_b_i is not None else LinearVehicle.STEERING_RANGE\n self.data = data\n self.interval = VehicleInterval(self)\n self.trajectory = []\n self.interval_trajectory = []\n self.longitudinal_lpv, self.lateral_lpv = None, None\n self.previous_target_lane_index = self.target_lane_index\n\n @classmethod\n def create_from(cls, vehicle: LinearVehicle) -> \"IntervalVehicle\":\n v = cls(vehicle.road,\n vehicle.position,\n heading=vehicle.heading,\n speed=vehicle.speed,\n target_lane_index=getattr(vehicle, 'target_lane_index', None),\n target_speed=getattr(vehicle, 'target_speed', None),\n route=getattr(vehicle, 'route', None),\n timer=getattr(vehicle, 'timer', None),\n theta_a_i=getattr(vehicle, 'theta_a_i', None),\n theta_b_i=getattr(vehicle, 'theta_b_i', None),\n data=getattr(vehicle, \"data\", None))\n return v\n\n def step(self, dt: float, mode: str = \"partial\") -> None:\n self.store_trajectories()\n if self.crashed:\n self.interval = VehicleInterval(self)\n else:\n if mode == \"partial\":\n # self.observer_step(dt)\n self.partial_observer_step(dt)\n elif mode == \"predictor\":\n self.predictor_step(dt)\n super().step(dt)\n\n def observer_step(self, dt: float) -> None:\n \"\"\"\n Step the interval observer dynamics\n\n :param dt: timestep [s]\n \"\"\"\n # Input state intervals\n position_i = self.interval.position\n v_i = self.interval.speed\n psi_i = self.interval.heading\n\n # Features interval\n front_interval = self.get_front_interval()\n\n # Acceleration features\n phi_a_i = np.zeros((2, 3))\n phi_a_i[:, 0] = [0, 0]\n if front_interval:\n phi_a_i[:, 1] = interval_negative_part(\n intervals_diff(front_interval.speed, v_i))\n # Lane distance interval\n lane_psi = self.lane.heading_at(self.lane.local_coordinates(self.position)[0])\n lane_direction = [np.cos(lane_psi), np.sin(lane_psi)]\n diff_i = intervals_diff(front_interval.position, position_i)\n d_i = vector_interval_section(diff_i, lane_direction)\n\n d_safe_i = self.DISTANCE_WANTED + self.TIME_WANTED * v_i\n phi_a_i[:, 2] = interval_negative_part(intervals_diff(d_i, d_safe_i))\n\n # Steering features\n phi_b_i = None\n lanes = self.get_followed_lanes()\n for lane_index in lanes:\n lane = self.road.network.get_lane(lane_index)\n longitudinal_pursuit = lane.local_coordinates(self.position)[0] + self.speed * self.TAU_PURSUIT\n lane_psi = lane.heading_at(longitudinal_pursuit)\n _, lateral_i = interval_absolute_to_local(position_i, lane)\n lateral_i = -np.flip(lateral_i)\n i_v_i = 1/np.flip(v_i, 0)\n phi_b_i_lane = np.transpose(np.array([\n [0, 0],\n intervals_product(lateral_i, i_v_i)]))\n # Union of candidate feature intervals\n if phi_b_i is None:\n phi_b_i = phi_b_i_lane\n else:\n phi_b_i[0] = np.minimum(phi_b_i[0], phi_b_i_lane[0])\n phi_b_i[1] = np.maximum(phi_b_i[1], phi_b_i_lane[1])\n\n # Commands interval\n a_i = intervals_product(self.theta_a_i, phi_a_i)\n b_i = intervals_product(self.theta_b_i, phi_b_i)\n\n # Speeds interval\n keep_stability = False\n if keep_stability:\n dv_i = integrator_interval(v_i - self.target_speed, self.theta_a_i[:, 0])\n else:\n dv_i = intervals_product(self.theta_a_i[:, 0], self.target_speed - np.flip(v_i, 0))\n dv_i += a_i\n dv_i = np.clip(dv_i, -self.ACC_MAX, self.ACC_MAX)\n keep_stability = True\n if keep_stability:\n delta_psi = list(map(utils.wrap_to_pi, psi_i - lane_psi))\n d_psi_i = integrator_interval(delta_psi, self.theta_b_i[:, 0])\n else:\n d_psi_i = intervals_product(self.theta_b_i[:, 0], lane_psi - np.flip(psi_i, 0))\n d_psi_i += b_i\n\n # Position interval\n cos_i = [-1 if psi_i[0] <= np.pi <= psi_i[1] else min(map(np.cos, psi_i)),\n 1 if psi_i[0] <= 0 <= psi_i[1] else max(map(np.cos, psi_i))]\n sin_i = [-1 if psi_i[0] <= -np.pi/2 <= psi_i[1] else min(map(np.sin, psi_i)),\n 1 if psi_i[0] <= np.pi/2 <= psi_i[1] else max(map(np.sin, psi_i))]\n dx_i = intervals_product(v_i, cos_i)\n dy_i = intervals_product(v_i, sin_i)\n\n # Interval dynamics integration\n self.interval.speed += dv_i * dt\n self.interval.heading += d_psi_i * dt\n self.interval.position[:, 0] += dx_i * dt\n self.interval.position[:, 1] += dy_i * dt\n\n # Add noise\n noise = 0.3\n self.interval.position[:, 0] += noise * dt * np.array([-1, 1])\n self.interval.position[:, 1] += noise * dt * np.array([-1, 1])\n self.interval.heading += noise * dt * np.array([-1, 1])\n\n def predictor_step(self, dt: float) -> None:\n \"\"\"\n Step the interval predictor dynamics\n\n :param dt: timestep [s]\n \"\"\"\n # Create longitudinal and lateral LPVs\n self.predictor_init()\n\n # Detect lane change and update intervals of local coordinates with the new frame\n if self.target_lane_index != self.previous_target_lane_index:\n position_i = self.interval.position\n target_lane = self.road.network.get_lane(self.target_lane_index)\n previous_target_lane = self.road.network.get_lane(self.previous_target_lane_index)\n longi_i, lat_i = interval_absolute_to_local(position_i, target_lane)\n psi_i = self.interval.heading + \\\n target_lane.heading_at(longi_i.mean()) - previous_target_lane.heading_at(longi_i.mean())\n x_i_local_unrotated = np.transpose([lat_i, psi_i])\n new_x_i_t = self.lateral_lpv.change_coordinates(x_i_local_unrotated, back=False, interval=True)\n delta = new_x_i_t.mean(axis=0) - self.lateral_lpv.x_i_t.mean(axis=0)\n self.lateral_lpv.x_i_t += delta\n x_i_local_unrotated = self.longitudinal_lpv.change_coordinates(self.longitudinal_lpv.x_i_t,\n back=True,\n interval=True)\n x_i_local_unrotated[:, 0] = longi_i\n new_x_i_t = self.longitudinal_lpv.change_coordinates(x_i_local_unrotated,\n back=False,\n interval=True)\n self.longitudinal_lpv.x_i_t += new_x_i_t.mean(axis=0) - self.longitudinal_lpv.x_i_t.mean(axis=0)\n self.previous_target_lane_index = self.target_lane_index\n\n # Step\n self.longitudinal_lpv.step(dt)\n self.lateral_lpv.step(dt)\n\n # Backward coordinates change\n x_i_long = self.longitudinal_lpv.change_coordinates(self.longitudinal_lpv.x_i_t, back=True, interval=True)\n x_i_lat = self.lateral_lpv.change_coordinates(self.lateral_lpv.x_i_t, back=True, interval=True)\n\n # Conversion from rectified to true coordinates\n target_lane = self.road.network.get_lane(self.target_lane_index)\n position_i = interval_local_to_absolute(x_i_long[:, 0], x_i_lat[:, 0], target_lane)\n self.interval.position = position_i\n self.interval.speed = x_i_long[:, 2]\n self.interval.heading = x_i_lat[:, 1]\n\n def predictor_init(self) -> None:\n \"\"\"Initialize the LPV models used for interval prediction.\"\"\"\n position_i = self.interval.position\n target_lane = self.road.network.get_lane(self.target_lane_index)\n longi_i, lat_i = interval_absolute_to_local(position_i, target_lane)\n v_i = self.interval.speed\n psi_i = self.interval.heading - self.lane.heading_at(longi_i.mean())\n\n # Longitudinal predictor\n if not self.longitudinal_lpv:\n front_interval = self.get_front_interval()\n\n # LPV specification\n if front_interval:\n f_longi_i, _ = interval_absolute_to_local(front_interval.position, target_lane)\n f_pos = f_longi_i[0]\n f_vel = front_interval.speed[0]\n else:\n f_pos, f_vel = 0, 0\n x0 = [longi_i[0], f_pos, v_i[0], f_vel]\n center = [-self.DISTANCE_WANTED - self.target_speed * self.TIME_WANTED,\n 0,\n self.target_speed,\n self.target_speed]\n noise = 1\n b = np.eye(4)\n d = np.array([[1], [0], [0], [0]])\n omega_i = np.array([[-1], [1]]) * noise\n u = [[self.target_speed], [self.target_speed], [0], [0]]\n a0, da = self.longitudinal_matrix_polytope()\n self.longitudinal_lpv = LPV(x0, a0, da, b, d, omega_i, u, center=center)\n\n # Lateral predictor\n if not self.lateral_lpv:\n # LPV specification\n x0 = [lat_i[0], psi_i[0]]\n center = [0, 0]\n noise = 0.5\n b = np.identity(2)\n d = np.array([[1], [0]])\n omega_i = np.array([[-1], [1]]) * noise\n u = [[0], [0]]\n a0, da = self.lateral_matrix_polytope()\n self.lateral_lpv = LPV(x0, a0, da, b, d, omega_i, u, center=center)\n\n def longitudinal_matrix_polytope(self) -> Polytope:\n return IntervalVehicle.parameter_box_to_polytope(self.theta_a_i, self.longitudinal_structure)\n\n def lateral_matrix_polytope(self) -> Polytope:\n return IntervalVehicle.parameter_box_to_polytope(self.theta_b_i, self.lateral_structure)\n\n @staticmethod\n def parameter_box_to_polytope(parameter_box: np.ndarray, structure: Callable) -> Polytope:\n a, phi = structure()\n a_theta = lambda params: a + np.tensordot(phi, params, axes=[0, 0])\n return polytope(a_theta, parameter_box)\n\n def get_front_interval(self) -> \"VehicleInterval\":\n # TODO: For now, we assume the front vehicle follows the models' front vehicle\n front_vehicle, _ = self.road.neighbour_vehicles(self)\n if front_vehicle:\n if isinstance(front_vehicle, IntervalVehicle):\n # Use interval from the observer estimate of the front vehicle\n front_interval = front_vehicle.interval\n else:\n # The front vehicle trajectory interval is not being estimated, so it should be considered as certain.\n # We use a new observer created from that current vehicle state, which will have full certainty.\n front_interval = IntervalVehicle.create_from(front_vehicle).interval\n else:\n front_interval = None\n return front_interval\n\n def get_followed_lanes(self, lane_change_model: str = \"model\", squeeze: bool = True) -> List[LaneIndex]:\n \"\"\"\n Get the list of lanes that could be followed by this vehicle.\n\n :param lane_change_model: - model: assume that the vehicle will follow the lane of its model behaviour.\n - all: assume that any lane change decision is possible at any timestep\n - right: assume that a right lane change decision is possible at any timestep\n :param squeeze: if True, remove duplicate lanes (at boundaries of the road)\n :return: the list of followed lane indexes\n \"\"\"\n lanes = []\n if lane_change_model == \"model\":\n lanes = [self.target_lane_index]\n elif lane_change_model == \"all\":\n lanes = self.road.network.side_lanes(self.target_lane_index) + [self.target_lane_index]\n elif lane_change_model == \"right\":\n lanes = [self.target_lane_index]\n _from, _to, _id = self.target_lane_index\n if _id < len(self.road.network.graph[_from][_to]) - 1 \\\n and self.road.network.get_lane((_from, _to, _id + 1)).is_reachable_from(self.position):\n lanes += [(_from, _to, _id + 1)]\n elif not squeeze:\n lanes += [self.target_lane_index] # Right lane is also current lane\n return lanes\n\n def partial_observer_step(self, dt: float, alpha: float = 0) -> None:\n \"\"\"\n Step the boundary parts of the current state interval\n\n 1. Split x_i(t) into two upper and lower intervals x_i_-(t) and x_i_+(t)\n 2. Propagate their observer dynamics x_i_-(t+dt) and x_i_+(t+dt)\n 3. Merge the resulting intervals together to x_i(t+dt).\n\n :param dt: timestep [s]\n :param alpha: ratio of the full interval that defines the boundaries\n \"\"\"\n # 1. Split x_i(t) into two upper and lower intervals x_i_-(t) and x_i_+(t)\n o = self.interval\n v_minus = IntervalVehicle.create_from(self)\n v_minus.interval = copy.deepcopy(self.interval)\n v_minus.interval.position[1, :] = (1 - alpha) * o.position[0, :] + alpha * o.position[1, :]\n v_minus.interval.speed[1] = (1 - alpha) * o.speed[0] + alpha * o.speed[1]\n v_minus.interval.heading[1] = (1 - alpha) * o.heading[0] + alpha * o.heading[1]\n v_plus = IntervalVehicle.create_from(self)\n v_plus.interval = copy.deepcopy(self.interval)\n v_plus.interval.position[0, :] = alpha * o.position[0, :] + (1 - alpha) * o.position[1, :]\n v_plus.interval.speed[0] = alpha * o.speed[0] + (1 - alpha) * o.speed[1]\n v_plus.interval.heading[0] = alpha * o.heading[0] + (1 - alpha) * o.heading[1]\n # 2. Propagate their observer dynamics x_i_-(t+dt) and x_i_+(t+dt)\n v_minus.road = copy.copy(v_minus.road)\n v_minus.road.vehicles = [v if v is not self else v_minus for v in v_minus.road.vehicles]\n v_plus.road = copy.copy(v_plus.road)\n v_plus.road.vehicles = [v if v is not self else v_plus for v in v_plus.road.vehicles]\n v_minus.observer_step(dt)\n v_plus.observer_step(dt)\n # 3. Merge the resulting intervals together to x_i(t+dt).\n self.interval.position = np.array([v_minus.interval.position[0], v_plus.interval.position[1]])\n self.interval.speed = np.array([v_minus.interval.speed[0], v_plus.interval.speed[1]])\n self.interval.heading = np.array([min(v_minus.interval.heading[0], v_plus.interval.heading[0]),\n max(v_minus.interval.heading[1], v_plus.interval.heading[1])])\n\n def store_trajectories(self) -> None:\n \"\"\"Store the current model, min and max states to a trajectory list.\"\"\"\n self.trajectory.append(LinearVehicle.create_from(self))\n self.interval_trajectory.append(copy.deepcopy(self.interval))\n\n def handle_collisions(self, other: 'RoadObject', dt: float) -> None:\n \"\"\"\n Worst-case collision check.\n\n For robust planning, we assume that MDPVehicles collide with the uncertainty set of an IntervalVehicle,\n which corresponds to worst-case outcome.\n\n :param other: the other vehicle\n :param dt: a timestep\n \"\"\"\n if not isinstance(other, MDPVehicle):\n super().handle_collisions(other)\n return\n\n if not self.collidable or self.crashed or other is self:\n return\n\n # Fast rectangular pre-check\n if not utils.point_in_rectangle(other.position,\n self.interval.position[0] - self.LENGTH,\n self.interval.position[1] + self.LENGTH):\n return\n\n # Projection of other vehicle to uncertainty rectangle. This is the possible position of this vehicle which is\n # the most likely to collide with other vehicle\n projection = np.minimum(np.maximum(other.position, self.interval.position[0]),\n self.interval.position[1])\n # Accurate rectangular check\n if utils.rotated_rectangles_intersect((projection, self.LENGTH, self.WIDTH, self.heading),\n (other.position, 0.9*other.LENGTH, 0.9*other.WIDTH, other.heading)):\n self.speed = other.speed = min(self.speed, other.speed)\n self.crashed = other.crashed = True\n\n\nclass VehicleInterval(object):\n def __init__(self, vehicle: Vehicle) -> None:\n self.position = np.array([vehicle.position, vehicle.position], dtype=float)\n self.speed = np.array([vehicle.speed, vehicle.speed], dtype=float)\n self.heading = np.array([vehicle.heading, vehicle.heading], dtype=float)\n"
] | [
[
"numpy.maximum",
"numpy.minimum",
"numpy.clip",
"numpy.eye",
"numpy.cos",
"numpy.sin",
"numpy.identity",
"numpy.tensordot",
"numpy.flip",
"numpy.transpose",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yanzhaochang/ELSO | [
"0674cd62e2e87a1ea31da8cab9d0613bae273887"
] | [
"code/DMADE.py"
] | [
"import numpy as np\r\nimport pandas as pd \r\nimport random\r\nimport multiprocessing\r\nimport csv\r\nimport math \r\n\r\nfrom PyQt5.QtWidgets import *\r\n\r\nfrom keras.models import load_model\r\nfrom keras import backend as K\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n\r\n\r\nclass ADELSM(): # 代理辅助模型驱动的差分进化切负荷模型\r\n def __init__(self, size=100, iter_num=200):\r\n self.__size = size\r\n self.__iter_num = iter_num \r\n self.__F = 0.8\r\n self.__CR = 0.4\r\n\r\n self.__process = []\r\n\r\n \r\n def set_load_shedding_location(self, path_name):\r\n data = pd.read_csv(path_name, header=0, engine='python')\r\n loads_shedding = data['负荷'].values.tolist() \r\n for i in range(len(loads_shedding)):\r\n loads_shedding[i] = eval(loads_shedding[i])\r\n\r\n self.max_percent = data['最大切除比例'].values.tolist()\r\n self.loads_shedding = loads_shedding\r\n self.__dim = len(self.loads_shedding) # 个体特征维数\r\n self.x_min = np.zeros(self.__dim)\r\n self.x_max = np.array(self.max_percent) \r\n\r\n def set_blocking_hvdc_location(self, path_name):\r\n data = pd.read_csv(path_name, header=0, engine='python')\r\n data = data['直流'].values.tolist()\r\n for i in range(len(data)):\r\n data[i] = eval(data[i])\r\n self.hvdc_block = data \r\n\r\n # 以下为加载代理辅助模型,目前只有频率模型\r\n def load_frequency_classification_prediction_model(self, file):\r\n K.clear_session()\r\n self.frequency_classification_model = load_model(file) \r\n\r\n # 以下为加载运行场景\r\n def load_scenario_data(self, raw_file, dyr_file):\r\n from stepspy import STEPS \r\n self.simulator = STEPS(is_default=False, log_file='.\\\\simulation\\\\log_op.txt')\r\n self.simulator.load_powerflow_data(raw_file, 'PSS/E')\r\n self.simulator.load_dynamic_data(dyr_file, 'PSS/E')\r\n self.simulator.solve_powerflow('PQ')\r\n self.__loads_p = np.zeros(len(self.loads_shedding)) # 该方式下的负荷量\r\n for i in range(len(self.loads_shedding)):\r\n self.__loads_p[i] = self.simulator.get_load_data(self.loads_shedding[i], 'F', 'PP0_MW') \r\n return\r\n\r\n\r\n # 以下为设置差分进化算法的参数\r\n def set_evolution_parameter(self, par_name, value):\r\n if par_name == '交叉因子':\r\n self.__CR = value \r\n\r\n elif par_name == '变异因子':\r\n self.__F = value \r\n else:\r\n pass\r\n\r\n #以下为种群初始化\r\n def initialize_population(self):\r\n self.population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__dim):\r\n for j in range(self.__size): # 初始种群在限度范围内随机初始化\r\n self.population[j, i] = random.uniform(self.x_min[i], self.x_max[i]) \r\n\r\n self.population_fitness = np.zeros(self.__size)\r\n for i in range(self.__size):\r\n self.population_fitness[i] = self.get_individual_fitness(self.population[i, :]) \r\n\r\n # 以下为进化过程\r\n def operate_evolution(self):\r\n '''实施进化计算'''\r\n k = 0\r\n while True:\r\n mutation_population = self.operate_population_mutation()\r\n process_population = self.operate_population_crossover(mutation_population)\r\n self.operate_selection(process_population)\r\n for j in range(self.__size):\r\n self.population_fitness[j] = self.get_individual_fitness(self.population[j, :]) \r\n mean_fitness = np.mean(self.population_fitness)\r\n max_fitness = np.max(self.population_fitness)\r\n min_fitness = np.min(self.population_fitness)\r\n \r\n self.process_output.append('第{}代-平均值{}-最大值{}-最小值{}'.format(k, int(mean_fitness), int(max_fitness), int(min_fitness))) \r\n self.process_output.verticalScrollBar().setValue(self.process_output.verticalScrollBar().maximum())\r\n QApplication.processEvents()\r\n k = k + 1\r\n if k > self.__iter_num:\r\n break \r\n\r\n for i in range(self.__size):\r\n self.population_fitness[i] = self.get_individual_fitness(self.population[i, :]) \r\n return \r\n\r\n def operate_evolution_one_enpouch(self):\r\n mutation_population = self.operate_population_mutation()\r\n process_population = self.operate_population_crossover(mutation_population)\r\n self.operate_selection(process_population)\r\n for j in range(self.__size):\r\n self.population_fitness[j] = self.get_individual_fitness(self.population[j, :]) \r\n mean_fitness = np.mean(self.population_fitness)\r\n max_fitness = np.max(self.population_fitness)\r\n min_fitness = np.min(self.population_fitness) \r\n return int(mean_fitness), int(max_fitness), int(min_fitness)\r\n\r\n def operate_population_mutation(self):\r\n '''对种群实施变异'''\r\n mutation_population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__size):\r\n list_num = list(range(0, self.__size, 1))\r\n list_num.remove(i)\r\n res = random.sample(list_num, 3)\r\n mutation_individual = self.population[res[0], :] + self.__F * (self.population[res[1], :] - self.population[res[2], :]) # 变异操作,产生新个体\r\n \r\n for j in range(self.__dim): # 特征越限处理\r\n if mutation_individual[j] < self.x_min[j] or mutation_individual[j] > self.x_max[j]:\r\n mutation_individual = self.x_min + random.random() * (self.x_max - self.x_min)\r\n break \r\n \r\n mutation_population[i, :] = mutation_individual \r\n return mutation_population\r\n\r\n def operate_population_crossover(self, mutation_population):\r\n '''进行交叉操作'''\r\n process_population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__size):\r\n randn = random.randint(0, self.__dim)\r\n for j in range(self.__dim):\r\n rand_float = random.random()\r\n if rand_float <= self.__CR or randn == j:\r\n process_population[i, j] = mutation_population[i, j]\r\n else:\r\n process_population[i, j] = self.population[i, j]\r\n return process_population\r\n \r\n\r\n def operate_selection(self, process_population):\r\n '''对个体进行选择和更新'''\r\n for i in range(self.__size):\r\n ind_1 = self.population[i, :]\r\n ind_2 = process_population[i, :]\r\n better_ind = self.select_individual_cla(ind_1, ind_2)\r\n self.population[i, :] = better_ind\r\n return\r\n\r\n def select_individual_cla(self, ind_1, ind_2):\r\n ind_1_cla = self.frequency_classification_model.predict_classes(5 * ind_1.reshape((1, -1)))\r\n ind_1_cla = ind_1_cla[0, 0] \r\n ind_2_cla = self.frequency_classification_model.predict_classes(5 * ind_2.reshape((1, -1)))\r\n ind_2_cla = ind_2_cla[0, 0] \r\n \r\n if ind_1_cla == 1 and ind_2_cla == 1:\r\n fit_1 = self.get_individual_fitness(ind_1)\r\n fit_2 = self.get_individual_fitness(ind_2)\r\n if fit_1 > fit_2:\r\n better_ind = ind_2\r\n else:\r\n better_ind = ind_1\r\n elif ind_1_cla == 0 and ind_2_cla == 1: \r\n better_ind = ind_2\r\n\r\n elif ind_1_cla == 0 and ind_2_cla == 0:\r\n better_ind = np.zeros(self.__dim)\r\n for i in range(self.__dim):\r\n better_ind[i] = random.uniform(self.x_min[i], self.x_max[i])\r\n else:\r\n better_ind = ind_1\r\n return better_ind \r\n\r\n # 以下为个体适应度计算函数\r\n def get_individual_fitness(self, individual): # individual应该是个一维数组\r\n value = np.sum(individual * self.__loads_p) # 切负荷总量\r\n return value \r\n\r\n def save_best_individual(self, file):\r\n min_index = np.argmin(self.population_fitness)\r\n best_individual = self.population[min_index, :]\r\n with open(file, 'w', newline='') as f: \r\n csv_write = csv.writer(f)\r\n csv_write.writerow(self.loads_shedding)\r\n csv_write.writerow(best_individual)\r\n\r\n loads_name = []\r\n for load in self.loads_shedding:\r\n NAME = self.simulator.get_bus_data(load[0], 'S', 'NAME')\r\n loads_name.append(NAME)\r\n return best_individual, self.__loads_p, self.loads_shedding, loads_name\r\n\r\n def check_evolution_result(self, best_individual):\r\n buses = self.simulator.get_all_buses() \r\n for bus in buses:\r\n AREA = self.simulator.get_bus_data(bus, 'I', 'AREA')\r\n if AREA == 37: \r\n self.simulator.prepare_bus_meter(bus, 'FREQUENCY IN HZ') \r\n self.simulator.set_dynamic_simulation_time_step(0.002)\r\n self.simulator.set_dynamic_simulator_output_file('.\\\\simulation\\\\代理辅助模型校验结果')\r\n self.simulator.start_dynamic_simulation()\r\n self.simulator.run_dynamic_simulation_to_time(0.5)\r\n \r\n for hvdc in self.hvdc_block:\r\n self.simulator.manually_block_hvdc(hvdc) \r\n self.simulator.trip_fixed_shunt((hvdc[1], '1')) \r\n\r\n self.simulator.run_dynamic_simulation_to_time(0.6)\r\n for i in range(len(self.loads_shedding)):\r\n self.simulator.scale_load(self.loads_shedding[i], -1*best_individual[i])\r\n \r\n self.simulator.run_dynamic_simulation_to_time(5.0) \r\n self.simulator.stop_dynamic_simulation() \r\n\r\n sample_data = pd.read_csv('.\\\\simulation\\\\代理辅助模型校验结果.csv', header=0, engine='python')\r\n columns = list(sample_data)\r\n frequency_column = []\r\n for column in columns:\r\n if 'FREQUENCY' in column:\r\n frequency_column.append(column)\r\n else:\r\n pass\r\n frequency_data = sample_data.loc[:, frequency_column]\r\n min_frequency = np.min(frequency_data.values)\r\n return min_frequency\r\n \r\n\r\n\r\n \r\nclass ADELSMTL(): # 代理辅助模型驱动的差分进化切负荷模型\r\n def __init__(self, size=50, iter_num=200, F=0.8, CR=0.4):\r\n self.__size = size\r\n self.__iter_num = iter_num \r\n self.__F = F\r\n self.__CR = CR\r\n\r\n # 以下为加载代理辅助模型,目前只有频率模型\r\n def load_frequency_classification_prediction_model(self, file):\r\n K.clear_session()\r\n self.frequency_classification_model = load_model(file)\r\n\r\n # 以下为加载运行场景\r\n def load_scenario_data(self, raw_file, dyr_file):\r\n from stepspy import STEPS \r\n self.simulator = STEPS(is_default=False, log_file='.\\simulation\\log.txt')\r\n self.simulator.load_powerflow_data(raw_file, 'PSS/E')\r\n self.simulator.load_dynamic_data(dyr_file, 'PSS/E')\r\n self.simulator.solve_powerflow('PQ')\r\n\r\n self.__dim = len(self.loads_shedding) # 个体特征维数\r\n \r\n self.__loads_p = np.zeros(len(self.loads_shedding)) # 该方式下的负荷量\r\n for i in range(len(self.loads_shedding)):\r\n self.__loads_p[i] = self.simulator.get_load_data(self.loads_shedding[i], 'F', 'PP0_MW')\r\n\r\n\r\n def set_load_shedding_location(self, path_name):\r\n data = pd.read_csv(path_name, header=0, engine='python')\r\n loads_shedding = data['负荷'].values.tolist() \r\n for i in range(len(loads_shedding)):\r\n loads_shedding[i] = eval(loads_shedding[i])\r\n\r\n self.max_percent = data['最大切除比例'].values.tolist()\r\n self.loads_shedding = loads_shedding\r\n self.__dim = len(self.loads_shedding) # 个体特征维数\r\n self.x_min = np.zeros(self.__dim)\r\n self.x_max = np.array(self.max_percent) \r\n\r\n #以下为种群初始化\r\n def initialize_population(self):\r\n self.population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__dim):\r\n for j in range(self.__size): # 初始种群在限度范围内随机初始化\r\n self.population[j, i] = random.uniform(self.x_min[i], self.x_max[i])\r\n\r\n self.population_fitness = np.zeros(self.__size)\r\n for i in range(self.__size):\r\n self.population_fitness[i] = self.get_individual_fitness(self.population[i, :])\r\n\r\n def correct_F(self, current):\r\n lanba = math.exp(1 - self.__iter_num / (self.__iter_num + 1 - current))\r\n F = 1.0 * 2 ** lanba\r\n return F\r\n\r\n def correct_CR(self):\r\n CR = 0.5 * (1 + random.random()) \r\n return CR \r\n\r\n # 以下为进化过程\r\n def operate_evolution(self):\r\n '''实施进化计算'''\r\n k = 0\r\n while True:\r\n F = self.correct_F(k)\r\n mutation_population = self.operate_population_mutation(F)\r\n \r\n process_population = self.operate_population_crossover(mutation_population)\r\n \r\n self.operate_selection(process_population)\r\n\r\n for j in range(self.__size):\r\n self.population_fitness[j] = self.get_individual_fitness(self.population[j, :]) \r\n mean_fitness = np.mean(self.population_fitness)\r\n max_fitness = np.max(self.population_fitness)\r\n min_fitness = np.min(self.population_fitness)\r\n \r\n min_fit_index = np.argmin(self.population_fitness)\r\n best_ind = self.population[min_fit_index, :]\r\n\r\n k = k + 1\r\n if k > self.__iter_num:\r\n break \r\n\r\n for i in range(self.__size):\r\n self.population_fitness[i] = self.get_individual_fitness(self.population[i, :]) \r\n\r\n min_index = np.argmin(self.population_fitness)\r\n self.best_individual = self.population[min_index, :] \r\n return \r\n\r\n def operate_population_mutation(self, F):\r\n '''对种群实施变异'''\r\n self.__F = F \r\n mutation_population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__size):\r\n list_num = list(range(0, self.__size, 1))\r\n list_num.remove(i)\r\n res = random.sample(list_num, 3)\r\n mutation_individual = self.population[res[0], :] + self.__F * (self.population[res[1], :] - self.population[res[2], :]) # 变异操作,产生新个体\r\n \r\n for j in range(self.__dim): # 特征越限处理\r\n if mutation_individual[j] < self.x_min[j] or mutation_individual[j] > self.x_max[j]:\r\n mutation_individual = self.x_min + random.random() * (self.x_max - self.x_min)\r\n break \r\n \r\n mutation_population[i, :] = mutation_individual \r\n return mutation_population\r\n\r\n def operate_population_crossover(self, mutation_population):\r\n '''进行交叉操作'''\r\n self.__CR = self.correct_CR()\r\n process_population = np.zeros((self.__size, self.__dim))\r\n for i in range(self.__size):\r\n randn = random.randint(0, self.__dim)\r\n for j in range(self.__dim):\r\n rand_float = random.random()\r\n if rand_float <= self.__CR or randn == j:\r\n process_population[i, j] = mutation_population[i, j]\r\n else:\r\n process_population[i, j] = self.population[i, j]\r\n return process_population\r\n \r\n\r\n def operate_selection(self, process_population):\r\n '''对个体进行选择和更新'''\r\n for i in range(self.__size):\r\n ind_1 = self.population[i, :]\r\n ind_2 = process_population[i, :]\r\n better_ind = self.select_individual_cla(ind_1, ind_2)\r\n self.population[i, :] = better_ind\r\n return\r\n\r\n def select_individual_cla(self, ind_1, ind_2):\r\n ind_1_cla = self.frequency_classification_model.predict_classes(5 * ind_1.reshape((1, -1)))\r\n ind_1_cla = ind_1_cla[0, 0] \r\n ind_2_cla = self.frequency_classification_model.predict_classes(5 * ind_2.reshape((1, -1)))\r\n ind_2_cla = ind_2_cla[0, 0] \r\n \r\n if ind_1_cla == 1 and ind_2_cla == 1:\r\n fit_1 = self.get_individual_fitness(ind_1)\r\n fit_2 = self.get_individual_fitness(ind_2)\r\n if fit_1 > fit_2:\r\n better_ind = ind_2\r\n else:\r\n better_ind = ind_1\r\n elif ind_1_cla == 0 and ind_2_cla == 1: \r\n better_ind = ind_2\r\n\r\n elif ind_1_cla == 0 and ind_2_cla == 0: \r\n better_ind = np.zeros(self.__dim)\r\n for i in range(self.__dim):\r\n better_ind[i] = random.uniform(self.x_min[i], self.x_max[i])\r\n else:\r\n better_ind = ind_1\r\n return better_ind \r\n\r\n \r\n # 以下为个体适应度计算函数\r\n def get_individual_fitness(self, individual): # individual应该是个一维数组\r\n value = np.sum(individual * self.__loads_p) # 切负荷总量\r\n return value \r\n \r\n\r\n def get_best_individual(self):\r\n min_shedding_power = self.get_individual_fitness(self.best_individual)\r\n return self.best_individual, min_shedding_power"
] | [
[
"pandas.read_csv",
"numpy.min",
"numpy.max",
"numpy.mean",
"numpy.argmin",
"numpy.array",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
senden9/lsh-rs | [
"d21f0c450338f04269d6ac49ca6cfccbf71a531d"
] | [
"lsh-py/test/test_.py"
] | [
"from floky import L2, SRP, QueryResult\nimport numpy as np\nfrom scipy.spatial.distance import cdist\nfrom typing import List\n\n\ndef get_mean_collisions(results: List[QueryResult]):\n return np.mean(list(map(lambda qr: qr.n_collisions, results)))\n\n\ndef test_l2():\n # first check we don't get any error if we don't have results\n np.random.seed(1)\n n = 1\n dim = 10\n arr = np.random.randn(n, dim)\n lsh = L2(n_projections=10, n_hash_tables=1, log=False, seed=1)\n lsh.fit(arr)\n assert lsh.predict(np.random.randn(1, dim))[0] == QueryResult([], [], 0, [])\n\n N = 10000\n n = 100\n\n arr = np.random.randn(N, dim)\n dist = cdist(arr[:n], arr, metric=\"euclidean\")\n # get top 4 non trivial results\n top_k = dist.argsort(1)[:, 1:5]\n top_k_dist = dist[np.arange(n)[:, None], top_k]\n # define the distance R to the mean of top_k distances\n R = top_k_dist.mean()\n\n # use that to rescale the data\n arr /= R\n\n lsh = L2(n_projections=10, n_hash_tables=1, log=False, seed=1, r=4.0)\n lsh.fit(arr)\n\n query = np.random.randn(n, dim) / R\n results = lsh.predict(query, only_index=True, top_k=5)\n assert get_mean_collisions(results) == 36.7\n\n\ndef test_srp():\n np.random.seed(1)\n N = 10000\n n = 100\n dim = 10\n\n arr = np.random.randn(N, dim)\n lsh = SRP(n_projections=19, n_hash_tables=10, log=False, seed=1)\n lsh.fit(arr)\n query = np.random.randn(n, dim)\n results = lsh.predict(query)\n assert get_mean_collisions(results) == 36.21\n"
] | [
[
"numpy.arange",
"numpy.random.randn",
"scipy.spatial.distance.cdist",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
INK-USC/DIG | [
"7d732c55b9f72521f6d58f1b06e7756985bfd5f0"
] | [
"main.py"
] | [
"import sys, numpy as np, argparse, random\nsys.path.append('../')\n\nfrom tqdm import tqdm\n\nimport torch\nfrom datasets import load_dataset\nfrom dig import DiscretetizedIntegratedGradients\nfrom attributions import run_dig_explanation\nfrom metrics import eval_log_odds, eval_comprehensiveness, eval_sufficiency\nimport monotonic_paths\n\nall_outputs = []\n\n\ndef calculate_attributions(inputs, device, args, attr_func, base_token_emb, nn_forward_func, get_tokens):\n\t# computes the attributions for given input\n\n\t# move inputs to main device\n\tinp = [x.to(device) if x is not None else None for x in inputs]\n\n\t# compute attribution\n\tscaled_features, input_ids, ref_input_ids, input_embed, ref_input_embed, position_embed, ref_position_embed, type_embed, ref_type_embed, attention_mask = inp\n\tattr = run_dig_explanation(attr_func, scaled_features, position_embed, type_embed, attention_mask, (2**args.factor)*(args.steps+1)+1)\n\n\t# compute metrics\n\tlog_odd, pred\t= eval_log_odds(nn_forward_func, input_embed, position_embed, type_embed, attention_mask, base_token_emb, attr, topk=args.topk)\n\tcomp\t\t\t= eval_comprehensiveness(nn_forward_func, input_embed, position_embed, type_embed, attention_mask, base_token_emb, attr, topk=args.topk)\n\tsuff\t\t\t= eval_sufficiency(nn_forward_func, input_embed, position_embed, type_embed, attention_mask, base_token_emb, attr, topk=args.topk)\n\n\treturn log_odd, comp, suff\n\n\ndef main(args):\n\n\t# set seed\n\trandom.seed(args.seed)\n\tnp.random.seed(args.seed)\n\ttorch.manual_seed(args.seed)\n\n\t# neural network specific imports\n\tif args.nn == 'distilbert':\n\t\tfrom distilbert_helper import nn_forward_func, nn_init, get_inputs, get_base_token_emb, get_word_embeddings, get_tokens, load_mappings\n\telif args.nn == 'roberta':\n\t\tfrom roberta_helper import nn_forward_func, nn_init, get_inputs, get_base_token_emb, get_word_embeddings, get_tokens, load_mappings\n\telif args.nn == 'bert':\n\t\tfrom bert_helper import nn_forward_func, nn_init, get_inputs, get_base_token_emb, get_word_embeddings, get_tokens, load_mappings\n\telse:\n\t\traise NotImplementedError\n\n\tauxiliary_data = load_mappings(args.dataset, knn_nbrs=args.knn_nbrs)\n\n\t# Fix the gpu to use\n\tdevice\t\t= torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\n\t# init model and tokenizer in cpu first\n\tnn_init(device, args.dataset)\n\n\t# Define the Attribution function\n\tattr_func = DiscretetizedIntegratedGradients(nn_forward_func)\n\n\t# load the dataset\n\tif args.dataset == 'imdb':\n\t\tdataset\t= load_dataset('imdb')['test']\n\t\tdata\t= list(zip(dataset['text'], dataset['label']))\n\t\tdata\t= random.sample(data, 2000)\n\telif args.dataset == 'sst2':\n\t\tdataset\t= load_dataset('glue', 'sst2')['test']\n\t\tdata\t= list(zip(dataset['sentence'], dataset['label'], dataset['idx']))\n\telif args.dataset == 'rotten':\n\t\tdataset\t= load_dataset('rotten_tomatoes')['test']\n\t\tdata\t= list(zip(dataset['text'], dataset['label']))\n\telse:\n\t\traise NotImplementedError\n\n\t# get ref token embedding\n\tbase_token_emb = get_base_token_emb(device)\n\n\t# compute the DIG attributions for all the inputs\n\tprint('Starting attribution computation...')\n\tinputs = []\n\tlog_odds, comps, suffs, count = 0, 0, 0, 0\n\tprint_step = 2\n\tfor row in tqdm(data):\n\t\tinp = get_inputs(row[0], device)\n\t\tinput_ids, ref_input_ids, input_embed, ref_input_embed, position_embed, ref_position_embed, type_embed, ref_type_embed, attention_mask = inp\n\t\tscaled_features \t\t= monotonic_paths.scale_inputs(input_ids.squeeze().tolist(), ref_input_ids.squeeze().tolist(),\\\n\t\t\t\t\t\t\t\t\t\t\tdevice, auxiliary_data, steps=args.steps, factor=args.factor, strategy=args.strategy)\n\t\tinputs\t\t\t\t\t= [scaled_features, input_ids, ref_input_ids, input_embed, ref_input_embed, position_embed, ref_position_embed, type_embed, ref_type_embed, attention_mask]\n\t\tlog_odd, comp, suff\t\t= calculate_attributions(inputs, device, args, attr_func, base_token_emb, nn_forward_func, get_tokens)\n\t\tlog_odds\t+= log_odd\n\t\tcomps\t\t+= comp\n\t\tsuffs \t\t+= suff\n\t\tcount\t\t+= 1\n\n\t\t# print the metrics\n\t\tif count % print_step == 0:\n\t\t\tprint('Log-odds: ', np.round(log_odds / count, 4), 'Comprehensiveness: ', np.round(comps / count, 4), 'Sufficiency: ', np.round(suffs / count, 4))\n\n\tprint('Log-odds: ', np.round(log_odds / count, 4), 'Comprehensiveness: ', np.round(comps / count, 4), 'Sufficiency: ', np.round(suffs / count, 4))\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser(description='IG Path')\n\tparser.add_argument('-dataset', \tdefault='sst2', \t\tchoices=['sst2', 'imdb', 'ag', 'rotten', 'sst2_epoch'])\n\tparser.add_argument('-nn', \t\t\tdefault='distilbert', \tchoices=['distilbert', 'roberta', 'lstm', 'bert', 'albert'])\n\tparser.add_argument('-strategy', \tdefault='greedy', \t\tchoices=['greedy', 'maxcount'], help='The algorithm to find the next anchor point')\n\tparser.add_argument('-steps', \t\tdefault=30, type=int)\t# m\n\tparser.add_argument('-topk', \t\tdefault=20, type=int)\t# k\n\tparser.add_argument('-factor', \t\tdefault=0, \ttype=int)\t# f\n\tparser.add_argument('-knn_nbrs',\tdefault=500, type=int)\t# KNN\n\tparser.add_argument('-seed', \t\tdefault=42, type=int)\n\n\targs = parser.parse_args()\n\n\tmain(args)\n"
] | [
[
"numpy.round",
"torch.manual_seed",
"torch.cuda.is_available",
"numpy.random.seed"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
UNSKILL3-D/projectRobot | [
"bb584268f34ccae0da289ab74abc578af4b07c6d"
] | [
"robots/gen_save_maker.py"
] | [
"def compile_generator(filename):\n import numpy as np\n inp = open('generator.py', 'r')\n gen_code = ''\n buf = '+'\n while buf != 'def generate():\\n':\n buf = inp.readline()\n while buf:\n gen_code += buf\n buf = inp.readline()\n save = np.array(['dynamic', gen_code, 'default'], dtype=object)\n np.save(filename, save)\n"
] | [
[
"numpy.array",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
isabelyuyingwu/parcels | [
"5bddee6d8de5c1ea14ee5e1cf3602b89bcc34952"
] | [
"tests/test_kernel_language.py"
] | [
"from parcels import FieldSet, ParticleSet, ScipyParticle, JITParticle, Kernel, Variable\nfrom parcels.kernels.seawaterdensity import polyTEOS10_bsq, UNESCO_Density\nfrom parcels import random as parcels_random\nimport numpy as np\nimport pytest\nimport random as py_random\nfrom os import path\nimport sys\n\n\nptype = {'scipy': ScipyParticle, 'jit': JITParticle}\n\n\ndef expr_kernel(name, pset, expr):\n pycode = \"\"\"def %s(particle, fieldset, time):\n particle.p = %s\"\"\" % (name, expr)\n return Kernel(pset.fieldset, pset.ptype, pyfunc=None,\n funccode=pycode, funcname=name,\n funcvars=['particle'])\n\n\[email protected]\ndef fieldset(xdim=20, ydim=20):\n \"\"\" Standard unit mesh fieldset \"\"\"\n lon = np.linspace(0., 1., xdim, dtype=np.float32)\n lat = np.linspace(0., 1., ydim, dtype=np.float32)\n U, V = np.meshgrid(lat, lon)\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32)}\n dimensions = {'lat': lat, 'lon': lon}\n return FieldSet.from_data(data, dimensions, mesh='flat', transpose=True)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('Add', '2 + 5', 7),\n ('Sub', '6 - 2', 4),\n ('Mul', '3 * 5', 15),\n ('Div', '24 / 4', 6),\n])\ndef test_expression_int(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n assert(np.array([result == particle.p for particle in pset]).all())\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('Add', '2. + 5.', 7),\n ('Sub', '6. - 2.', 4),\n ('Mul', '3. * 5.', 15),\n ('Div', '24. / 4.', 6),\n ('Pow', '2 ** 3', 8),\n])\ndef test_expression_float(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n assert(np.array([result == particle.p for particle in pset]).all())\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('name, expr, result', [\n ('True', 'True', True),\n ('False', 'False', False),\n ('And', 'True and False', False),\n ('Or', 'True or False', True),\n ('Equal', '5 == 5', True),\n ('Lesser', '5 < 3', False),\n ('LesserEq', '3 <= 5', True),\n ('Greater', '4 > 2', True),\n ('GreaterEq', '2 >= 4', False),\n])\ndef test_expression_bool(fieldset, mode, name, expr, result, npart=10):\n \"\"\" Test basic arithmetic expressions \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n pset.execute(expr_kernel('Test%s' % name, pset, expr), endtime=1., dt=1.)\n if mode == 'jit':\n assert(np.array([result == (particle.p == 1) for particle in pset]).all())\n else:\n assert(np.array([result == particle.p for particle in pset]).all())\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_while_if_break(fieldset, mode):\n \"\"\"Test while, if and break commands\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])\n\n def kernel(particle, fieldset, time):\n while particle.p < 30:\n if particle.p > 9:\n break\n particle.p += 1\n if particle.p > 5:\n particle.p *= 2.\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(np.array([p.p for p in pset]), 20., rtol=1e-12)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_nested_if(fieldset, mode):\n \"\"\"Test nested if commands\"\"\"\n class TestParticle(ptype[mode]):\n p0 = Variable('p0', dtype=np.int32, initial=0)\n p1 = Variable('p1', dtype=np.int32, initial=1)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=0, lat=0)\n\n def kernel(particle, fieldset, time):\n if particle.p1 >= particle.p0:\n var = particle.p0\n if var + 1 < particle.p1:\n particle.p1 = -1\n\n pset.execute(kernel, endtime=10, dt=1.)\n assert np.allclose([pset[0].p0, pset[0].p1], [0, 1])\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_dt_as_variable_in_kernel(fieldset, mode):\n pset = ParticleSet(fieldset, pclass=ptype[mode], lon=0, lat=0)\n\n def kernel(particle, fieldset, time):\n dt = 1. # noqa\n\n pset.execute(kernel, endtime=10, dt=1.)\n\n\ndef test_parcels_tmpvar_in_kernel(fieldset):\n \"\"\"Tests for error thrown if vartiable with 'tmp' defined in custom kernel\"\"\"\n error_thrown = False\n pset = ParticleSet(fieldset, pclass=JITParticle, lon=0, lat=0)\n\n def kernel_tmpvar(particle, fieldset, time):\n parcels_tmpvar0 = 0 # noqa\n\n try:\n pset.execute(kernel_tmpvar, endtime=1, dt=1.)\n except NotImplementedError:\n error_thrown = True\n assert error_thrown\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_if_withfield(fieldset, mode):\n \"\"\"Test combination of if and Field sampling commands\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0], lat=[0])\n\n def kernel(particle, fieldset, time):\n u = fieldset.U[time, 0, 0, 1.]\n particle.p = 0\n if fieldset.U[time, 0, 0, 1.] == u:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:\n particle.p += 1\n if True:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == u and 1 == 1:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.] and fieldset.U[time, 0, 0, 1.] == fieldset.U[time, 0, 0, 1.]:\n particle.p += 1\n if fieldset.U[time, 0, 0, 1.] == u:\n particle.p += 1\n else:\n particle.p += 1000\n if fieldset.U[time, 0, 0, 1.] == 3:\n particle.p += 1000\n else:\n particle.p += 1\n\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(np.array([p.p for p in pset]), 7., rtol=1e-12)\n\n\[email protected](\n 'mode',\n ['scipy',\n pytest.param('jit',\n marks=pytest.mark.xfail(\n (sys.version_info >= (3, 0)) or (sys.platform == 'win32'),\n reason=\"py.test FD capturing does not work for jit on python3 or Win\"))\n ])\ndef test_print(fieldset, mode, capfd):\n \"\"\"Test print statements\"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32, initial=0.)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0.5])\n\n def kernel(particle, fieldset, time):\n particle.p = fieldset.U[time, particle.depth, particle.lat, particle.lon]\n tmp = 5\n print(\"%d %f %f\" % (particle.id, particle.p, tmp))\n pset.execute(kernel, endtime=1., dt=1.)\n out, err = capfd.readouterr()\n lst = out.split(' ')\n tol = 1e-8\n assert abs(float(lst[0]) - pset[0].id) < tol and abs(float(lst[1]) - pset[0].p) < tol and abs(float(lst[2]) - 5) < tol\n\n def kernel2(particle, fieldset, time):\n tmp = 3\n print(\"%f\" % (tmp))\n pset.execute(kernel2, endtime=1., dt=1.)\n out, err = capfd.readouterr()\n lst = out.split(' ')\n assert abs(float(lst[0]) - 3) < tol\n\n\ndef random_series(npart, rngfunc, rngargs, mode):\n random = parcels_random if mode == 'jit' else py_random\n random.seed(1234)\n func = getattr(random, rngfunc)\n series = [func(*rngargs) for _ in range(npart)]\n random.seed(1234) # Reset the RNG seed\n return series\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('rngfunc, rngargs', [\n ('random', []),\n ('uniform', [0., 20.]),\n ('randint', [0, 20]),\n])\ndef test_random_float(fieldset, mode, rngfunc, rngargs, npart=10):\n \"\"\" Test basic random number generation \"\"\"\n class TestParticle(ptype[mode]):\n p = Variable('p', dtype=np.float32 if rngfunc == 'randint' else np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle,\n lon=np.linspace(0., 1., npart),\n lat=np.zeros(npart) + 0.5)\n series = random_series(npart, rngfunc, rngargs, mode)\n kernel = expr_kernel('TestRandom_%s' % rngfunc, pset,\n 'random.%s(%s)' % (rngfunc, ', '.join([str(a) for a in rngargs])))\n pset.execute(kernel, endtime=1., dt=1.)\n assert np.allclose(np.array([p.p for p in pset]), series, atol=1e-9)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('c_inc', ['str', 'file'])\ndef test_c_kernel(fieldset, mode, c_inc):\n coord_type = np.float32 if c_inc == 'str' else np.float64\n pset = ParticleSet(fieldset, pclass=ptype[mode], lon=[0.5], lat=[0],\n lonlatdepth_dtype=coord_type)\n\n def func(U, lon, dt):\n u = U.data[0, 2, 1]\n return lon + u * dt\n\n if c_inc == 'str':\n c_include = \"\"\"\n static inline ErrorCode func(CField *f, float *lon, float *dt)\n {\n float data2D[2][2][2];\n ErrorCode err = getCell2D(f, 1, 2, 0, data2D, 1); CHECKERROR(err);\n float u = data2D[0][0][0];\n *lon += u * *dt;\n return SUCCESS;\n }\n \"\"\"\n else:\n c_include = path.join(path.dirname(__file__), 'customed_header.h')\n\n def ckernel(particle, fieldset, time):\n func('parcels_customed_Cfunc_pointer_args', fieldset.U, particle.lon, particle.dt)\n\n def pykernel(particle, fieldset, time):\n particle.lon = func(fieldset.U, particle.lon, particle.dt)\n\n if mode == 'scipy':\n kernel = pset.Kernel(pykernel)\n else:\n kernel = pset.Kernel(ckernel, c_include=c_include)\n pset.execute(kernel, endtime=3., dt=3.)\n assert np.allclose(pset[0].lon, 0.81578948)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_dt_modif_by_kernel(fieldset, mode):\n class TestParticle(ptype[mode]):\n age = Variable('age', dtype=np.float32)\n pset = ParticleSet(fieldset, pclass=TestParticle, lon=[0.5], lat=[0])\n\n def modif_dt(particle, fieldset, time):\n particle.age += particle.dt\n particle.dt = 2\n\n endtime = 4\n pset.execute(modif_dt, endtime=endtime, dt=1.)\n assert np.isclose(pset[0].age, endtime)\n\n\[email protected]('mode', ['scipy', 'jit'])\ndef test_seawaterdensity_kernels(mode):\n\n def generate_fieldset(xdim=2, ydim=2, zdim=2, tdim=1):\n lon = np.linspace(0., 10., xdim, dtype=np.float32)\n lat = np.linspace(0., 10., ydim, dtype=np.float32)\n depth = np.linspace(0, 2000, zdim, dtype=np.float32)\n time = np.zeros(tdim, dtype=np.float64)\n U = np.ones((tdim, zdim, ydim, xdim))\n V = np.ones((tdim, zdim, ydim, xdim))\n abs_salinity = 30 * np.ones((tdim, zdim, ydim, xdim))\n cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))\n dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),\n 'abs_salinity': np.array(abs_salinity, dtype=np.float32),\n 'cons_temperature': np.array(cons_temperature, dtype=np.float32)}\n return (data, dimensions)\n\n data, dimensions = generate_fieldset()\n fieldset = FieldSet.from_data(data, dimensions)\n\n class DensParticle(ptype[mode]):\n density = Variable('density', dtype=np.float32)\n\n pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)\n\n pset.execute(polyTEOS10_bsq, runtime=0, dt=0)\n assert np.allclose(pset[0].density, 1022.85377)\n\n\[email protected]('mode', ['scipy', 'jit'])\[email protected]('pressure', [0, 10])\ndef test_UNESCOdensity_kernel(mode, pressure):\n\n def generate_fieldset(p, xdim=2, ydim=2, zdim=2, tdim=1):\n lon = np.linspace(0., 10., xdim, dtype=np.float32)\n lat = np.linspace(0., 10., ydim, dtype=np.float32)\n depth = np.linspace(0, 2000, zdim, dtype=np.float32)\n time = np.zeros(tdim, dtype=np.float64)\n U = np.ones((tdim, zdim, ydim, xdim))\n V = np.ones((tdim, zdim, ydim, xdim))\n psu_salinity = 8 * np.ones((tdim, zdim, ydim, xdim))\n cons_temperature = 10 * np.ones((tdim, zdim, ydim, xdim))\n cons_pressure = p * np.ones((tdim, zdim, ydim, xdim))\n dimensions = {'lat': lat, 'lon': lon, 'depth': depth, 'time': time}\n data = {'U': np.array(U, dtype=np.float32), 'V': np.array(V, dtype=np.float32),\n 'psu_salinity': np.array(psu_salinity, dtype=np.float32),\n 'cons_pressure': np.array(cons_pressure, dtype=np.float32),\n 'cons_temperature': np.array(cons_temperature, dtype=np.float32)}\n return (data, dimensions)\n\n data, dimensions = generate_fieldset(pressure)\n fieldset = FieldSet.from_data(data, dimensions)\n\n class DensParticle(ptype[mode]):\n density = Variable('density', dtype=np.float32)\n\n pset = ParticleSet(fieldset, pclass=DensParticle, lon=5, lat=5, depth=1000)\n\n pset.execute(UNESCO_Density, runtime=0, dt=0)\n\n if(pressure == 0):\n assert np.allclose(pset[0].density, 1005.9465)\n elif(pressure == 10):\n assert np.allclose(pset[0].density, 1006.4179)\n"
] | [
[
"numpy.allclose",
"numpy.meshgrid",
"numpy.linspace",
"numpy.ones",
"numpy.array",
"numpy.zeros",
"numpy.isclose"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sinclairnick/EfficientDet | [
"f26fc90dab8e17479de141c75b4fb1a39904aea2"
] | [
"generators/common.py"
] | [
"import numpy as np\nimport random\nimport warnings\nimport cv2\nfrom tensorflow import keras\n\nfrom utils.anchors import anchors_for_shape, anchor_targets_bbox, AnchorParameters\n\n\nclass Generator(keras.utils.Sequence):\n \"\"\"\n Abstract generator class.\n \"\"\"\n\n def __init__(\n self,\n phi=0,\n image_sizes=(512, 640, 768, 896, 1024, 1280, 1408),\n misc_effect=None,\n visual_effect=None,\n batch_size=1,\n group_method='random', # one of 'none', 'random', 'ratio'\n shuffle_groups=True,\n detect_text=False,\n detect_quadrangle=False,\n ):\n \"\"\"\n Initialize Generator object.\n\n Args:\n batch_size: The size of the batches to generate.\n group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).\n shuffle_groups: If True, shuffles the groups each epoch.\n image_sizes:\n \"\"\"\n self.misc_effect = misc_effect\n self.visual_effect = visual_effect\n self.batch_size = int(batch_size)\n self.group_method = group_method\n self.shuffle_groups = shuffle_groups\n self.detect_text = detect_text\n self.detect_quadrangle = detect_quadrangle\n self.image_size = image_sizes[phi]\n self.groups = None\n self.anchor_parameters = AnchorParameters.default if not self.detect_text else AnchorParameters(\n ratios=(0.25, 0.5, 1., 2.),\n sizes=(16, 32, 64, 128, 256))\n self.anchors = anchors_for_shape((self.image_size, self.image_size), anchor_params=self.anchor_parameters)\n self.num_anchors = self.anchor_parameters.num_anchors()\n\n # Define groups\n self.group_images()\n\n # Shuffle when initializing\n if self.shuffle_groups:\n random.shuffle(self.groups)\n\n def on_epoch_end(self):\n if self.shuffle_groups:\n random.shuffle(self.groups)\n\n def size(self):\n \"\"\"\n Size of the dataset.\n \"\"\"\n raise NotImplementedError('size method not implemented')\n\n def get_anchors(self):\n \"\"\"\n loads the anchors from a txt file\n \"\"\"\n with open(self.anchors_path) as f:\n anchors = f.readline()\n anchors = [float(x) for x in anchors.split(',')]\n # (N, 2), wh\n return np.array(anchors).reshape(-1, 2)\n\n def num_classes(self):\n \"\"\"\n Number of classes in the dataset.\n \"\"\"\n raise NotImplementedError('num_classes method not implemented')\n # NOTE: ADDED\n def num_colors(self):\n \"\"\"\n Number of classes in the dataset.\n \"\"\"\n raise NotImplementedError('num_classes method not implemented')\n\n\n def has_label(self, label):\n \"\"\"\n Returns True if label is a known label.\n \"\"\"\n raise NotImplementedError('has_label method not implemented')\n\n def has_name(self, name):\n \"\"\"\n Returns True if name is a known class.\n \"\"\"\n raise NotImplementedError('has_name method not implemented')\n\n def name_to_label(self, name):\n \"\"\"\n Map name to label.\n \"\"\"\n raise NotImplementedError('name_to_label method not implemented')\n\n def label_to_name(self, label):\n \"\"\"\n Map label to name.\n \"\"\"\n raise NotImplementedError('label_to_name method not implemented')\n\n def image_aspect_ratio(self, image_index):\n \"\"\"\n Compute the aspect ratio for an image with image_index.\n \"\"\"\n raise NotImplementedError('image_aspect_ratio method not implemented')\n\n def load_image(self, image_index):\n \"\"\"\n Load an image at the image_index.\n \"\"\"\n raise NotImplementedError('load_image method not implemented')\n\n def load_annotations(self, image_index):\n \"\"\"\n Load annotations for an image_index.\n \"\"\"\n raise NotImplementedError('load_annotations method not implemented')\n\n def load_annotations_group(self, group):\n \"\"\"\n Load annotations for all images in group.\n \"\"\"\n annotations_group = [self.load_annotations(image_index) for image_index in group]\n for annotations in annotations_group:\n assert (isinstance(annotations,\n dict)), '\\'load_annotations\\' should return a list of dictionaries, received: {}'.format(\n type(annotations))\n assert (\n 'labels' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n assert (\n 'bboxes' in annotations), '\\'load_annotations\\' should return a list of dictionaries that contain \\'labels\\' and \\'bboxes\\'.'\n\n return annotations_group\n\n def filter_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n invalid_indices = np.where(\n (annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |\n (annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |\n (annotations['bboxes'][:, 0] < 0) |\n (annotations['bboxes'][:, 1] < 0) |\n (annotations['bboxes'][:, 2] <= 0) |\n (annotations['bboxes'][:, 3] <= 0) |\n (annotations['bboxes'][:, 2] > image.shape[1]) |\n (annotations['bboxes'][:, 3] > image.shape[0])\n )[0]\n\n # delete invalid indices\n if len(invalid_indices):\n warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(\n group[index],\n image.shape,\n annotations['bboxes'][invalid_indices, :]\n ))\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)\n # if annotations['bboxes'].shape[0] == 0:\n # warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(\n # group[index],\n # image.shape,\n # ))\n return image_group, annotations_group\n\n def clip_transformed_annotations(self, image_group, annotations_group, group):\n \"\"\"\n Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.\n \"\"\"\n # test all annotations\n filtered_image_group = []\n filtered_annotations_group = []\n for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):\n image_height = image.shape[0]\n image_width = image.shape[1]\n # x1\n annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)\n # y1\n annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)\n # x2\n annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)\n # y2\n annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)\n # test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]\n small_indices = np.where(\n (annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 3) |\n (annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 3)\n )[0]\n\n # delete invalid indices\n if len(small_indices):\n for k in annotations_group[index].keys():\n annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)\n # import cv2\n # for invalid_index in small_indices:\n # x1, y1, x2, y2 = annotations['bboxes'][invalid_index]\n # label = annotations['labels'][invalid_index]\n # class_name = self.labels[label]\n # print('width: {}'.format(x2 - x1))\n # print('height: {}'.format(y2 - y1))\n # cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)\n # cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)\n # cv2.namedWindow('image', cv2.WINDOW_NORMAL)\n # cv2.imshow('image', image)\n # cv2.waitKey(0)\n filtered_image_group.append(image)\n filtered_annotations_group.append(annotations_group[index])\n\n return filtered_image_group, filtered_annotations_group\n\n def load_image_group(self, group):\n \"\"\"\n Load images for all images in a group.\n \"\"\"\n return [self.load_image(image_index) for image_index in group]\n\n def random_visual_effect_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # apply visual effect\n image = self.visual_effect(image)\n return image, annotations\n\n def random_visual_effect_group(self, image_group, annotations_group):\n \"\"\"\n Randomly apply visual effect on each image.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n if self.visual_effect is None:\n # do nothing\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # apply effect on a single group entry\n image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(\n image_group[index], annotations_group[index]\n )\n\n return image_group, annotations_group\n\n def random_misc_group_entry(self, image, annotations):\n \"\"\"\n Randomly transforms image and annotation.\n \"\"\"\n # randomly transform both image and annotations\n image, annotations = self.misc_effect(image, annotations)\n return image, annotations\n\n def random_misc_group(self, image_group, annotations_group):\n \"\"\"\n Randomly transforms each image and its annotations.\n \"\"\"\n\n assert (len(image_group) == len(annotations_group))\n\n if self.misc_effect is None:\n return image_group, annotations_group\n\n for index in range(len(image_group)):\n # transform a single group entry\n image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def preprocess_group_entry(self, image, annotations):\n \"\"\"\n Preprocess image and its annotations.\n \"\"\"\n\n # preprocess the image\n image, scale = self.preprocess_image(image)\n\n # apply resizing to annotations too\n annotations['bboxes'] *= scale\n if self.detect_quadrangle:\n annotations['quadrangles'] *= scale\n return image, annotations\n\n def preprocess_group(self, image_group, annotations_group):\n \"\"\"\n Preprocess each image and its annotations in its group.\n \"\"\"\n assert (len(image_group) == len(annotations_group))\n\n for index in range(len(image_group)):\n # preprocess a single group entry\n image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],\n annotations_group[index])\n\n return image_group, annotations_group\n\n def group_images(self):\n \"\"\"\n Order the images according to self.order and makes groups of self.batch_size.\n \"\"\"\n # determine the order of the images\n\n order = list(range(self.size()))\n if self.group_method == 'random':\n random.shuffle(order)\n elif self.group_method == 'ratio':\n order.sort(key=lambda x: self.image_aspect_ratio(x))\n\n # divide into groups, one group = one batch\n self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in\n range(0, len(order), self.batch_size)]\n\n def compute_inputs(self, image_group, annotations_group):\n \"\"\"\n Compute inputs for the network using an image_group.\n \"\"\"\n batch_images = np.array(image_group).astype(np.float32)\n return [batch_images]\n\n def compute_alphas_and_ratios(self, annotations_group):\n for i, annotations in enumerate(annotations_group):\n quadrangles = annotations['quadrangles']\n alphas = np.zeros((quadrangles.shape[0], 4), dtype=np.float32)\n xmin = np.min(quadrangles, axis=1)[:, 0]\n ymin = np.min(quadrangles, axis=1)[:, 1]\n xmax = np.max(quadrangles, axis=1)[:, 0]\n ymax = np.max(quadrangles, axis=1)[:, 1]\n # alpha1, alpha2, alpha3, alpha4\n alphas[:, 0] = (quadrangles[:, 0, 0] - xmin) / (xmax - xmin)\n alphas[:, 1] = (quadrangles[:, 1, 1] - ymin) / (ymax - ymin)\n alphas[:, 2] = (xmax - quadrangles[:, 2, 0]) / (xmax - xmin)\n alphas[:, 3] = (ymax - quadrangles[:, 3, 1]) / (ymax - ymin)\n annotations['alphas'] = alphas\n # ratio\n area1 = 0.5 * alphas[:, 0] * (1 - alphas[:, 3])\n area2 = 0.5 * alphas[:, 1] * (1 - alphas[:, 0])\n area3 = 0.5 * alphas[:, 2] * (1 - alphas[:, 1])\n area4 = 0.5 * alphas[:, 3] * (1 - alphas[:, 2])\n annotations['ratios'] = 1 - area1 - area2 - area3 - area4\n\n def compute_targets(self, image_group, annotations_group):\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n \"\"\"\n Compute target outputs for the network using images and their annotations.\n \"\"\"\n\n batches_targets = anchor_targets_bbox(\n self.anchors,\n image_group,\n annotations_group,\n num_classes=self.num_classes(),\n num_colors=self.num_colors(),\n detect_quadrangle=self.detect_quadrangle\n )\n return list(batches_targets)\n\n def compute_inputs_targets(self, group, debug=False):\n \"\"\"\n Compute inputs and target outputs for the network.\n \"\"\"\n\n # load images and annotations\n # list\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly apply visual effect\n image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)\n\n # randomly transform data\n # image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n\n # randomly apply misc effect\n image_group, annotations_group = self.random_misc_group(image_group, annotations_group)\n\n # perform preprocessing steps\n image_group, annotations_group = self.preprocess_group(image_group, annotations_group)\n\n # check validity of annotations\n image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)\n\n assert len(image_group) != 0\n assert len(image_group) == len(annotations_group)\n\n if self.detect_quadrangle:\n # compute alphas and ratio for targets\n self.compute_alphas_and_ratios(annotations_group)\n\n # compute network inputs\n inputs = self.compute_inputs(image_group, annotations_group)\n\n # compute network targets\n targets = self.compute_targets(image_group, annotations_group)\n\n if debug:\n return inputs, targets, annotations_group\n\n return inputs, targets\n\n def __len__(self):\n \"\"\"\n Number of batches for generator.\n \"\"\"\n\n return len(self.groups)\n\n def __getitem__(self, index):\n \"\"\"\n Keras sequence method for generating batches.\n \"\"\"\n group = self.groups[index]\n inputs, targets = self.compute_inputs_targets(group)\n return inputs, targets\n\n def preprocess_image(self, image):\n # image, RGB\n image_height, image_width = image.shape[:2]\n if image_height > image_width:\n scale = self.image_size / image_height\n resized_height = self.image_size\n resized_width = int(image_width * scale)\n else:\n scale = self.image_size / image_width\n resized_height = int(image_height * scale)\n resized_width = self.image_size\n\n image = cv2.resize(image, (resized_width, resized_height))\n image = image.astype(np.float32)\n image /= 255.\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n image -= mean\n image /= std\n pad_h = self.image_size - resized_height\n pad_w = self.image_size - resized_width\n image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')\n return image, scale\n\n def get_augmented_data(self, group):\n \"\"\"\n Compute inputs and target outputs for the network.\n \"\"\"\n\n # load images and annotations\n # list\n image_group = self.load_image_group(group)\n annotations_group = self.load_annotations_group(group)\n\n # check validity of annotations\n image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)\n\n # randomly apply visual effect\n # image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)\n\n # randomly transform data\n # image_group, annotations_group = self.random_transform_group(image_group, annotations_group)\n\n # randomly apply misc effect\n # image_group, annotations_group = self.random_misc_group(image_group, annotations_group)\n\n # perform preprocessing steps\n image_group, annotations_group = self.preprocess_group(image_group, annotations_group)\n\n # check validity of annotations\n image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)\n\n assert len(image_group) != 0\n assert len(image_group) == len(annotations_group)\n\n # compute alphas for targets\n self.compute_alphas_and_ratios(annotations_group)\n\n return image_group, annotations_group\n"
] | [
[
"numpy.pad",
"numpy.clip",
"numpy.min",
"numpy.max",
"numpy.delete",
"numpy.array",
"numpy.where",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
t170815518/AdvancedEAST | [
"744e4fc7243e61cb4fad510e7705678013593151"
] | [
"losses.py"
] | [
"import tensorflow as tf\n\nimport cfg\n\n\ndef quad_loss(y_true, y_pred):\n # loss for inside_score\n logits = y_pred[:, :, :, :1]\n labels = y_true[:, :, :, :1]\n # balance positive and negative samples in an image\n beta = 1 - tf.reduce_mean(labels)\n # first apply sigmoid activation\n predicts = tf.nn.sigmoid(logits)\n # log +epsilon for stable cal\n inside_score_loss = tf.reduce_mean(\n -1 * (beta * labels * tf.math.log(predicts + cfg.epsilon) +\n (1 - beta) * (1 - labels) * tf.math.log(1 - predicts + cfg.epsilon)))\n inside_score_loss *= cfg.lambda_inside_score_loss\n\n # loss for side_vertex_code\n vertex_logits = y_pred[:, :, :, 1:3]\n vertex_labels = y_true[:, :, :, 1:3]\n vertex_beta = 1 - (tf.reduce_mean(y_true[:, :, :, 1:2])\n / (tf.reduce_mean(labels) + cfg.epsilon))\n vertex_predicts = tf.nn.sigmoid(vertex_logits)\n pos = -1 * vertex_beta * vertex_labels * tf.math.log(vertex_predicts +\n cfg.epsilon)\n neg = -1 * (1 - vertex_beta) * (1 - vertex_labels) * tf.math.log(\n 1 - vertex_predicts + cfg.epsilon)\n positive_weights = tf.cast(tf.equal(y_true[:, :, :, 0], 1), tf.float32)\n side_vertex_code_loss = \\\n tf.reduce_sum(tf.reduce_sum(pos + neg, axis=-1) * positive_weights) / (\n tf.reduce_sum(positive_weights) + cfg.epsilon)\n side_vertex_code_loss *= cfg.lambda_side_vertex_code_loss\n\n # loss for side_vertex_coord delta\n g_hat = y_pred[:, :, :, 3:]\n g_true = y_true[:, :, :, 3:]\n vertex_weights = tf.cast(tf.equal(y_true[:, :, :, 1], 1), tf.float32)\n pixel_wise_smooth_l1norm = smooth_l1_loss(g_hat, g_true, vertex_weights)\n side_vertex_coord_loss = tf.reduce_sum(pixel_wise_smooth_l1norm) / (\n tf.reduce_sum(vertex_weights) + cfg.epsilon)\n side_vertex_coord_loss *= cfg.lambda_side_vertex_coord_loss\n return inside_score_loss + side_vertex_code_loss + side_vertex_coord_loss\n\n\ndef smooth_l1_loss(prediction_tensor, target_tensor, weights):\n n_q = tf.reshape(quad_norm(target_tensor), tf.shape(weights))\n diff = prediction_tensor - target_tensor\n abs_diff = tf.abs(diff)\n abs_diff_lt_1 = tf.less(abs_diff, 1)\n pixel_wise_smooth_l1norm = (tf.reduce_sum(\n tf.where(abs_diff_lt_1, 0.5 * tf.square(abs_diff), abs_diff - 0.5),\n axis=-1) / n_q) * weights\n return pixel_wise_smooth_l1norm\n\n\ndef quad_norm(g_true):\n shape = tf.shape(g_true)\n delta_xy_matrix = tf.reshape(g_true, [-1, 2, 2])\n diff = delta_xy_matrix[:, 0:1, :] - delta_xy_matrix[:, 1:2, :]\n square = tf.square(diff)\n distance = tf.sqrt(tf.reduce_sum(square, axis=-1))\n distance *= 4.0\n distance += cfg.epsilon\n return tf.reshape(distance, shape[:-1])\n"
] | [
[
"tensorflow.nn.sigmoid",
"tensorflow.shape",
"tensorflow.less",
"tensorflow.reduce_mean",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.equal",
"tensorflow.math.log",
"tensorflow.square",
"tensorflow.abs"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
BUPTlfq/OpenHGNN | [
"77041e68c33a8a42a2c187c6e42d85b81cbb25d3"
] | [
"openhgnn/models/HGNN_AC.py"
] | [
"import torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport numpy as np\r\nfrom . import BaseModel, register_model\r\n\r\n@register_model('HGNN_AC')\r\nclass HGNN_AC(BaseModel):\r\n r\"\"\"\r\n Desctiption\r\n -----------\r\n HGNN_AC was introduced in `HGNN_AC <https://dl.acm.org/doi/10.1145/3442381.3449914>`__.\r\n \r\n It included four parts:\r\n\r\n - Pre-learning of Topological Embedding\r\n HGNN-AC first obtains more comprehensive node sequences by random walk according to the frequently used multiple meta-paths, \r\n and then feeds these sequences to the skip-gram model to learn node embeddings :math:`H`.\r\n \r\n - Attribute Completion with Attention Mechanism\r\n HGNN-AC adopts a masked attention mechanism which means we only calculate :math:`e_{vu}` for nodes :math:`u\\in{N_v^+}`, \r\n where :math:`u\\in{N_v^+}` denotes the first-order neighbors of node :math:`v` \r\n in set :math:`V^+`, where :math:`V^+` is the set of nodes with attributes.\r\n \r\n .. math::\r\n e_{vu}=\\sigma(h_v^{T}Wh_u)\r\n \r\n where :math:`W` is the parametric matrix, and :math:`\\sigma` an activation function.\r\n \r\n Then, softmax function is applied to get normalized weighted coefficient :math:`a_{vu}`\r\n\r\n .. math::\r\n a_{vu}=softmax(e_{vu})=\\frac{exp(e_{vu})}{\\sum_{s\\in{N_v^+}}{exp(e_{vs})}}\r\n\r\n HGNN-AC can perform weighted aggregation of attributes\r\n for node :math:`v` according to weighted coefficient :math:`a_{vu}` :\r\n\r\n .. math::\r\n X_v^C=\\sum_{u\\in{N_v^+}}{a_{vu}x_u}\r\n\r\n where :math:`N_v^+` denotes the set of neighbors of node :math:`v\\in{V^+}`,\r\n and :math:`x_u` denotes the attributes of nodes :math:`u`.\r\n\r\n .. _here:\r\n \r\n Specially, the attention process is extended to a multi-head attention\r\n to stabilize the learning process and reduce the high variance\r\n\r\n .. math::\r\n X_v^C=mean(\\sum_k^K {\\sum_{u\\in{N_v^+}}{a_{vu}x_u}})\r\n\r\n where :math:`K` means that we perform :math:`K` independent attention process.\r\n\r\n - Dropping some Attributes\r\n To be specific, for nodes in :math:`V^+`, HGNN-AC randomly divides them into two parts\r\n :math:`V_{drop}^+` and :math:`V_{keep}^+` according to a small ratio :math:`\\alpha`, i.e. :math:`|V_{drop}^+|=\\alpha|V^+|`.\r\n HGNN-AC first drops attributes of nodes in :math:`V_{drop}^+` and then \r\n reconstructs these attributes via attributes of nodes :math:`V_{drop}^+` by conducting\r\n attribute completion.\r\n \r\n .. math::\r\n X_v^C=mean(\\sum_k^K {\\sum_{u\\in{V_{keep}^+ \\cap V_i^+}}{a_{vu}x_u}})\r\n\r\n It introduced a weakly supervised loss to optimize the parameters of attribute completion \r\n and use euclidean distance as the metric to design the loss function as:\r\n \r\n .. math::\r\n L_{completion}=\\frac{1}{|V_{drop}^+|}\\sum_{i \\in V_{drop}^+} \\sqrt{(X_i^C-X_i)^2}\r\n \r\n - Combination with HIN Model\r\n Now, we have completed attributes nodes in :math:`V^-`(the set of nodes without attribute), and the raw attributes nodes in :math:`V+`, \r\n Wthen the new attributes of all nodes are defined as:\r\n\r\n .. math::\r\n X^{new}=\\{X_i^C,X_j|\\forall i \\in V^-, \\forall j \\in V^+\\}\r\n\r\n the new attributes :math:`X^{new}`, together with network topology :math:`A`, as\r\n a new graph, are sent to the HIN model:\r\n\r\n .. math::\r\n \\overline{Y}=\\Phi(A,X^{new})\r\n L_{prediction}=f(\\overline{Y},Y)\r\n \r\n where :math:`\\Phi` denotes an arbitrary HINs model.\r\n\r\n the overall model can be optimized via back propagation in an end-to-end\r\n manner:\r\n\r\n .. math::\r\n L=\\lambda L_{completion}+L_{prediction}\r\n \r\n where :math:`\\lambda` is a weighted coefficient to balance these two parts.\r\n \r\n Parameters\r\n ----------\r\n in_dim: int\r\n nodes' topological embedding dimension\r\n hidden_dim: int\r\n hidden dimension \r\n dropout: float\r\n the dropout rate of neighbor nodes dropout\r\n activation: callable activation function\r\n the activation function used in HGNN_AC. default: ``F.elu``\r\n num_heads: int\r\n the number of heads in attribute completion with attention mechanism\r\n \"\"\"\r\n @classmethod\r\n def build_model_from_args(cls, args, hg):\r\n return cls(in_dim = hg.nodes[hg.ntypes[0]].data['emb'].shape[1], \r\n hidden_dim = args.attn_vec_dim, \r\n dropout = args.dropout, activation = F.elu, \r\n num_heads = args.num_heads,\r\n cuda = False if args.device == torch.device('cpu') else True)\r\n def __init__(self, in_dim, hidden_dim, dropout, activation, num_heads, cuda):\r\n super(HGNN_AC, self).__init__()\r\n self.dropout = dropout\r\n self.attentions = [AttentionLayer(in_dim, hidden_dim, dropout, activation, cuda) for _ in range(num_heads)]\r\n\r\n for i, attention in enumerate(self.attentions):\r\n self.add_module('attention_{}'.format(i), attention)\r\n\r\n def forward(self, bias, emb_dest, emb_src, feature_src):\r\n r\"\"\"\r\n Description\r\n -----------\r\n This is the forward part of model HGNN_AC\r\n\r\n Parameters\r\n ----------\r\n bias: matrix\r\n adjacency matrix related to the source nodes\r\n emb_dest: matrix\r\n embeddings of the destination node\r\n emb_src: matrix\r\n embeddings of the source node\r\n feature_src: matrix\r\n features of the source node\r\n \r\n Returns\r\n -------\r\n features: matrix\r\n the new features of the type of node\r\n \"\"\"\r\n \r\n #Attribute Completion with Attention Mechanism\r\n adj = F.dropout(bias, self.dropout, training=self.training)\r\n #x = sum_k(x_v)\r\n x = torch.cat([att(adj, emb_dest, emb_src, feature_src).unsqueeze(0) for att in self.attentions], dim=0)\r\n\r\n #X_{v}^{C} = mean(x)\r\n return torch.mean(x, dim=0, keepdim=False)\r\n\r\n\r\nclass AttentionLayer(nn.Module):\r\n r\"\"\"\r\n Description\r\n -------------------\r\n This is the attention process used in HGNN\\_AC. For more details, you can check here_.\r\n \r\n Parameters\r\n -------------------\r\n in_dim: int\r\n nodes' topological embedding dimension\r\n hidden_dim: int\r\n hidden dimension\r\n dropout: float\r\n the drop rate used in the attention\r\n activation: callable activation function\r\n the activation function used in HGNN_AC. default: ``F.elu``\r\n \"\"\"\r\n def __init__(self, in_dim, hidden_dim, dropout, activation, cuda=False):\r\n super(AttentionLayer, self).__init__()\r\n self.dropout = dropout\r\n self.activation = activation\r\n self.is_cuda = cuda\r\n\r\n self.W = nn.Parameter(nn.init.xavier_normal_(\r\n torch.Tensor(in_dim, hidden_dim).type(torch.cuda.FloatTensor if cuda else torch.FloatTensor),\r\n gain=np.sqrt(2.0)), requires_grad=True)\r\n self.W2 = nn.Parameter(nn.init.xavier_normal_(torch.Tensor(hidden_dim, hidden_dim).type(\r\n torch.cuda.FloatTensor if cuda else torch.FloatTensor), gain=np.sqrt(2.0)),\r\n requires_grad=True)\r\n\r\n self.leakyrelu = nn.LeakyReLU(0.2)\r\n\r\n def forward(self, bias, emb_dest, emb_src, feature_src):\r\n r\"\"\"\r\n Description\r\n ----------------\r\n This is the forward part of the attention process.\r\n \r\n Parameters\r\n --------------\r\n bias: matrix\r\n the processed adjacency matrix related to the source nodes\r\n emb_dest: matrix\r\n the embeddings of the destination nodes\r\n emb_src: matrix\r\n the embeddings of the source nodes\r\n feature_src: matrix\r\n the features of the source nodes\r\n \r\n Returns\r\n ------------\r\n features: matrix\r\n the new features of the nodes\r\n \"\"\"\r\n h_1 = torch.mm(emb_src, self.W)\r\n h_2 = torch.mm(emb_dest, self.W)\r\n\r\n #contribution of the neighbor nodes using a masked attention\r\n #e_{vu} = activation(h_v * W * h_u)\r\n e = self.leakyrelu(torch.mm(torch.mm(h_2, self.W2), h_1.t()))\r\n zero_vec = -9e15 * torch.ones_like(e)\r\n attention = torch.where(bias > 0, e, zero_vec)\r\n \r\n #get normalized weighted coefficient\r\n #a_{vu} = softmax(e_{vu})\r\n attention = F.softmax(attention, dim=1)\r\n attention = F.dropout(attention, self.dropout, training=self.training)\r\n #x_v = sum(a_{vu} * x_u)\r\n h_prime = torch.matmul(attention, feature_src)\r\n\r\n #return a new attribute\r\n return self.activation(h_prime)\r\n"
] | [
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.mm",
"numpy.sqrt",
"torch.Tensor",
"torch.nn.functional.dropout",
"torch.matmul",
"torch.nn.LeakyReLU",
"torch.where",
"torch.device",
"torch.ones_like"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
LeapLabTHU/Pseudo-Q | [
"6919e47d18bae6eeca741998816c28647593faef"
] | [
"utils/loss_utils.py"
] | [
"import torch\nimport numpy as np\nimport torch.nn.functional as F\n\nfrom utils.box_utils import bbox_iou, xywh2xyxy, xyxy2xywh, generalized_box_iou\nfrom utils.misc import get_world_size\n\n\ndef build_target(args, gt_bbox, pred, device):\n batch_size = gt_bbox.size(0)\n num_scales = len(pred)\n coord_list, bbox_list = [], []\n for scale_ii in range(num_scales):\n this_stride = 32 // (2 ** scale_ii)\n grid = args.size // this_stride\n # Convert [x1, y1, x2, y2] to [x_c, y_c, w, h]\n center_x = (gt_bbox[:, 0] + gt_bbox[:, 2]) / 2\n center_y = (gt_bbox[:, 1] + gt_bbox[:, 3]) / 2\n box_w = gt_bbox[:, 2] - gt_bbox[:, 0]\n box_h = gt_bbox[:, 3] - gt_bbox[:, 1]\n coord = torch.stack((center_x, center_y, box_w, box_h), dim=1)\n # Normalized by the image size\n coord = coord / args.size\n coord = coord * grid\n coord_list.append(coord)\n bbox_list.append(torch.zeros(coord.size(0), 3, 5, grid, grid))\n\n best_n_list, best_gi, best_gj = [], [], []\n for ii in range(batch_size):\n anch_ious = []\n for scale_ii in range(num_scales):\n this_stride = 32 // (2 ** scale_ii)\n grid = args.size // this_stride\n gw = coord_list[scale_ii][ii, 2]\n gh = coord_list[scale_ii][ii, 3]\n\n anchor_idxs = [x + 3 * scale_ii for x in [0, 1, 2]]\n anchors = [args.anchors_full[i] for i in anchor_idxs]\n scaled_anchors = [(x[0] / (args.anchor_imsize / grid),\n x[1] / (args.anchor_imsize / grid)) for x in anchors]\n\n gt_box = torch.from_numpy(np.array([0, 0, gw.cpu().numpy(), gh.cpu().numpy()])).float().unsqueeze(0)\n ## Get shape of anchor box\n anchor_shapes = torch.FloatTensor(\n np.concatenate((np.zeros((len(scaled_anchors), 2)), np.array(scaled_anchors)), 1))\n\n ## Calculate iou between gt and anchor shapes\n anch_ious += list(bbox_iou(gt_box, anchor_shapes))\n ## Find the best matching anchor box\n best_n = np.argmax(np.array(anch_ious))\n best_scale = best_n // 3\n\n best_grid = args.size // (32 / (2 ** best_scale))\n anchor_idxs = [x + 3 * best_scale for x in [0, 1, 2]]\n anchors = [args.anchors_full[i] for i in anchor_idxs]\n scaled_anchors = [(x[0] / (args.anchor_imsize / best_grid), \\\n x[1] / (args.anchor_imsize / best_grid)) for x in anchors]\n\n gi = coord_list[best_scale][ii, 0].long()\n gj = coord_list[best_scale][ii, 1].long()\n tx = coord_list[best_scale][ii, 0] - gi.float()\n ty = coord_list[best_scale][ii, 1] - gj.float()\n gw = coord_list[best_scale][ii, 2]\n gh = coord_list[best_scale][ii, 3]\n tw = torch.log(gw / scaled_anchors[best_n % 3][0] + 1e-16)\n th = torch.log(gh / scaled_anchors[best_n % 3][1] + 1e-16)\n\n bbox_list[best_scale][ii, best_n % 3, :, gj, gi] = torch.stack(\n [tx, ty, tw, th, torch.ones(1).to(device).squeeze()])\n best_n_list.append(int(best_n))\n best_gi.append(gi)\n best_gj.append(gj)\n\n for ii in range(len(bbox_list)):\n bbox_list[ii] = bbox_list[ii].to(device)\n return bbox_list, best_gi, best_gj, best_n_list\n\n\ndef yolo_loss(pred_list, target, gi, gj, best_n_list, device, w_coord=5., w_neg=1. / 5, size_average=True):\n mseloss = torch.nn.MSELoss(size_average=True)\n celoss = torch.nn.CrossEntropyLoss(size_average=True)\n num_scale = len(pred_list)\n batch_size = pred_list[0].size(0)\n\n pred_bbox = torch.zeros(batch_size, 4).to(device)\n gt_bbox = torch.zeros(batch_size, 4).to(device)\n for ii in range(batch_size):\n pred_bbox[ii, 0:2] = torch.sigmoid(\n pred_list[best_n_list[ii] // 3][ii, best_n_list[ii] % 3, 0:2, gj[ii], gi[ii]])\n pred_bbox[ii, 2:4] = pred_list[best_n_list[ii] // 3][ii, best_n_list[ii] % 3, 2:4, gj[ii], gi[ii]]\n gt_bbox[ii, :] = target[best_n_list[ii] // 3][ii, best_n_list[ii] % 3, :4, gj[ii], gi[ii]]\n loss_x = mseloss(pred_bbox[:, 0], gt_bbox[:, 0])\n loss_y = mseloss(pred_bbox[:, 1], gt_bbox[:, 1])\n loss_w = mseloss(pred_bbox[:, 2], gt_bbox[:, 2])\n loss_h = mseloss(pred_bbox[:, 3], gt_bbox[:, 3])\n\n pred_conf_list, gt_conf_list = [], []\n for scale_ii in range(num_scale):\n pred_conf_list.append(pred_list[scale_ii][:, :, 4, :, :].contiguous().view(batch_size, -1))\n gt_conf_list.append(target[scale_ii][:, :, 4, :, :].contiguous().view(batch_size, -1))\n pred_conf = torch.cat(pred_conf_list, dim=1)\n gt_conf = torch.cat(gt_conf_list, dim=1)\n loss_conf = celoss(pred_conf, gt_conf.max(1)[1])\n return (loss_x + loss_y + loss_w + loss_h) * w_coord + loss_conf\n\n\ndef trans_vg_loss(batch_pred, batch_target):\n \"\"\"Compute the losses related to the bounding boxes, \n including the L1 regression loss and the GIoU loss\n \"\"\"\n batch_size = batch_pred.shape[0]\n # world_size = get_world_size()\n num_boxes = batch_size\n\n loss_bbox = F.l1_loss(batch_pred, batch_target, reduction='none')\n loss_giou = 1 - torch.diag(generalized_box_iou(\n xywh2xyxy(batch_pred),\n xywh2xyxy(batch_target)\n ))\n\n losses = {}\n losses['loss_bbox'] = loss_bbox.sum() / num_boxes\n losses['loss_giou'] = loss_giou.sum() / num_boxes\n\n return losses\n"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.sigmoid",
"torch.ones",
"torch.nn.functional.l1_loss",
"torch.cat",
"torch.zeros",
"torch.log",
"torch.stack",
"numpy.array",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gjsun/ares | [
"5a85b8ba79c95fe5f6363e8b23c99bf750b08b70"
] | [
"ares/analysis/GalaxyPopulation.py"
] | [
"\"\"\"\n\nGalaxyPopulation.py\n\nAuthor: Jordan Mirocha\nAffiliation: UCLA\nCreated on: Thu Jan 28 12:38:11 PST 2016\n\nDescription: \n\n\"\"\"\n\nimport time\nimport numpy as np\nfrom ..util import labels\nfrom matplotlib import cm\nimport matplotlib.pyplot as pl\nfrom .ModelSet import ModelSet\nfrom ..util.Survey import Survey\nfrom ..phenom import DustCorrection\nfrom matplotlib.patches import Patch\nfrom ..util.ReadData import read_lit\nfrom ..util.Aesthetics import labels\nfrom scipy.optimize import curve_fit\nimport matplotlib.gridspec as gridspec\nfrom ..util.ProgressBar import ProgressBar\nfrom ..util.Photometry import what_filters\nfrom matplotlib.colors import ListedColormap\nfrom .MultiPlot import MultiPanel, add_master_legend\nfrom ..physics.Constants import rhodot_cgs, cm_per_pc\nfrom ..util.Stats import symmetrize_errors, bin_samples\nfrom ..populations.GalaxyPopulation import GalaxyPopulation as GP\nfrom ..populations.GalaxyEnsemble import GalaxyEnsemble\n\ntry:\n # this runs with no issues in python 2 but raises error in python 3\n basestring\nexcept:\n # this try/except allows for python 2/3 compatible string type checking\n basestring = str\n\ndatasets_lf = ('bouwens2015', 'finkelstein2015', 'bowler2020', 'stefanon2019', \n 'mclure2013', 'parsa2016', 'atek2015', 'alavi2016', \n 'reddy2009', 'weisz2014', 'bouwens2017', 'oesch2018', 'oesch2013', \n 'oesch2014', 'vanderburg2010', 'morishita2018', 'rojasruiz2020')\ndatasets_smf = ('song2016', 'stefanon2017', 'duncan2014', 'tomczak2014')\ndatasets_mzr = ('sanders2015',)\n\ngroups_lf = \\\n{\n 'dropouts': ('parsa2016', 'bouwens2015',\n 'finkelstein2015', 'bowler2020','stefanon2019', 'mclure2013',\n 'vanderburg2010', 'reddy2009', 'oesch2018', 'oesch2013', 'oesch2014',\n 'morishita2018', 'rojasruiz2020'),\n 'lensing': ('alavi2016', 'atek2015', 'bouwens2017'),\n 'local': ('weisz2014,'),\n 'all': datasets_lf,\n}\n\ngroups_smf = {'all': datasets_smf}\ngroups = {'lf': groups_lf, 'smf': groups_smf, 'smf_sf': groups_smf, \n 'smf_tot': groups_smf, \n 'mzr': {'all': datasets_mzr}}\n\ncolors_cyc = ['m', 'c', 'r', 'g', 'b', 'y', 'orange', 'gray'] * 3\nmarkers = ['o', 's', 'p', 'h', 'D', 'd', '^', 'v', '<', '>'] * 3\n \ndefault_colors = {}\ndefault_markers = {} \nfor i, dataset in enumerate(datasets_lf):\n default_colors[dataset] = colors_cyc[i]\n default_markers[dataset] = markers[i]\n\nfor i, dataset in enumerate(datasets_smf):\n default_colors[dataset] = colors_cyc[i]\n default_markers[dataset] = markers[i]\n\nfor i, dataset in enumerate(datasets_mzr):\n default_colors[dataset] = colors_cyc[i]\n default_markers[dataset] = markers[i] \n \ndefault_markers['stefanon2017'] = 's'\n\n_ulim_tick = 0.5\n\nclass GalaxyPopulation(object):\n def __init__(self):\n pass\n\n def compile_data(self, redshift, sources='all', round_z=False,\n quantity='lf', sources_except=[], just_above=True):\n \"\"\"\n Create a master dictionary containing the MUV points, phi points,\n and (possibly asymmetric) errorbars for all (or some) data available.\n \n .. note:: Since we store errorbars in +/- order (if asymmetric), but\n matplotlib.pyplot.errorbar assumes -/+ order, we swap the order here.\n \n Parameters\n ----------\n z : int, float\n Redshift, dummy!\n\n \"\"\"\n \n data = {}\n \n if isinstance(sources, basestring):\n if sources in groups[quantity]:\n if sources == 'all':\n srcs = []\n for src in groups[quantity]['all']:\n if src in sources_except:\n continue\n srcs.append(src)\n else: \n srcs = groups[quantity][sources]\n else:\n srcs = [sources]\n else:\n srcs = sources\n \n for source in srcs:\n src = read_lit(source)\n \n if redshift not in src.redshifts and (not round_z):\n print(\"No z={0:g} data in {1!s}.\".format(redshift, source))\n continue\n \n if redshift not in src.redshifts:\n i_close = np.argmin(np.abs(redshift - np.array(src.redshifts)))\n if abs(src.redshifts[i_close] - redshift) <= round_z:\n z = src.redshifts[i_close]\n else:\n continue\n \n else: \n z = redshift\n \n if quantity not in src.data:\n continue \n \n data[source] = {}\n \n if 'label' in src.info:\n data[source]['label'] = src.info['label']\n \n if quantity in ['lf']:\n data[source]['wavelength'] = src.wavelength \n \n M = src.data[quantity][z]['M'] \n if hasattr(M, 'data'):\n data[source]['M'] = M.data\n mask = M.mask\n else:\n data[source]['M'] = np.array(M)\n mask = np.zeros_like(data[source]['M'])\n \n if src.units[quantity] == 'log10':\n err_lo = []; err_hi = []; uplims = []; err_mask = []\n for i, err in enumerate(src.data[quantity][z]['err']):\n \n \n if type(err) not in [int, float]:\n err = np.mean(err)\n \n logphi_ML = src.data[quantity][z]['phi'][i]\n \n logphi_lo_tmp = logphi_ML - err # log10 phi\n logphi_hi_tmp = logphi_ML + err # log10 phi\n \n phi_lo = 10**logphi_lo_tmp\n phi_hi = 10**logphi_hi_tmp\n \n err1 = 10**logphi_ML - phi_lo\n err2 = phi_hi - 10**logphi_ML\n \n if (err < 0):\n err_hi.append(0.0)\n err_lo.append(_ulim_tick * 10**logphi_ML)\n else:\n err_lo.append(err1)\n err_hi.append(err2)\n \n uplims.append(err < 0) \n \n if np.ma.is_masked(err):\n err_mask.append(True) \n else:\n err_mask.append(False)\n \n data[source]['err'] = (err_lo, err_hi) \n if hasattr(src.data[quantity][z]['phi'], 'data'): \n data[source]['phi'] = \\\n np.ma.array(10**src.data[quantity][z]['phi'].data,\n mask=src.data[quantity][z]['phi'].mask)\n else:\n data[source]['phi'] = \\\n np.ma.array(10**np.array(src.data[quantity][z]['phi'].data),\n mask=src.data[quantity][z]['phi'].mask)\n \n data[source]['ulim'] = uplims\n else: \n \n if hasattr(src.data[quantity][z]['phi'], 'data'):\n data[source]['phi'] = \\\n np.ma.array(src.data[quantity][z]['phi'].data,\n mask=src.data[quantity][z]['phi'].mask)\n else:\n print(source)\n data[source]['phi'] = \\\n np.ma.array(src.data[quantity][z]['phi'].data,\n mask=src.data[quantity][z]['phi'])\n \n err_lo = []; err_hi = []; uplims = []; err_mask = []\n for i, err in enumerate(src.data[quantity][z]['err']):\n \n if type(err) in [list, tuple, np.ndarray]:\n err_hi.append(err[0])\n err_lo.append(err[1])\n uplims.append(False)\n err_mask.append(False)\n elif err is None:\n err_lo.append(0)\n err_hi.append(0)\n uplims.append(False)\n err_mask.append(True)\n else: \n if (err < 0):\n err_hi.append(0.0)\n err_lo.append(_ulim_tick * data[source]['phi'][i])\n else:\n err_hi.append(err)\n err_lo.append(err)\n \n uplims.append(err < 0)\n err_mask.append(err < 0)\n \n data[source]['ulim'] = np.array(uplims)\n \n err_lo = np.ma.array(err_lo, mask=err_mask)\n err_hi = np.ma.array(err_hi, mask=err_mask)\n mask2 = np.array([err_lo.mask==1, err_hi.mask==1])\n data[source]['err'] = np.ma.array((err_lo, err_hi), mask=mask2)\n \n data[source]['phi'] = np.ma.array(data[source]['phi'], mask=mask) \n data[source]['M'] = np.ma.array(data[source]['M'], mask=mask)\n \n return data\n \n def PlotLF(self, z, ax=None, fig=1, sources='all', round_z=False, \n AUV=None, wavelength=1600., sed_model=None, force_labels=False, **kwargs):\n \n return self.Plot(z=z, ax=ax, fig=fig, sources=sources, round_z=round_z,\n AUV=AUV, wavelength=1600, sed_model=None, quantity='lf', \n force_labels=force_labels, **kwargs) \n \n def PlotSMF(self, z, ax=None, fig=1, sources='all', round_z=False, \n AUV=None, wavelength=1600., sed_model=None, force_labels=False, **kwargs):\n \n return self.Plot(z=z, ax=ax, fig=fig, sources=sources, round_z=round_z,\n AUV=AUV, wavelength=1600, sed_model=None, quantity='smf', \n force_labels=force_labels, **kwargs) \n\n def PlotColors(self, pop, axes=None, fig=1, z_uvlf=[4,6,8,10],\n z_beta=[4,5,6,7], z_only=None, sources='all', repeat_z=True, beta_phot=True, \n show_Mstell=True, show_MUV=True, label=None, zcal=None, Mlim=-15,\n dmag=0.5, dlam_c94=10, fill=False, extra_pane=False, square=False,\n cmap=None, **kwargs):\n \"\"\"\n Make a nice plot showing UVLF and UV CMD constraints and models.\n \"\"\"\n \n num_uvlf_panels = 1\n if type(z_uvlf[0]) not in [int, float, np.int64, np.float64]:\n num_uvlf_panels = 2 \n assert not (show_Mstell and show_MUV)\n assert not square\n assert not extra_pane\n \n if axes is None:\n\n xp = extra_pane or num_uvlf_panels == 2\n\n if square:\n dims = (12, 12)\n nrows = 9\n ncols = 4\n hs = 0.1\n ws = 0.8\n\n assert not xp, \"Cannot add extra panel for square plot.\"\n assert show_Mstell, \"No point in square plot if only 2 panels.\"\n else:\n dims = (24, 6)\n nrows = 4\n ncols = 6 \\\n + 3 * int(num_uvlf_panels == 2) \\\n + 4 * extra_pane \\\n + 2 * (show_Mstell and show_MUV)\n \n hs = 0.1\n ws = 0.8\n \n if show_Mstell and show_MUV:\n fig = pl.figure(tight_layout=False, figsize=dims, num=fig)\n fig.subplots_adjust(left=0.1, right=0.9)\n gs = gridspec.GridSpec(nrows, ncols, hspace=hs, wspace=ws, \n figure=fig)\n ax_extra = None\n xp = 0 \n else:\n fig = pl.figure(tight_layout=False, figsize=(12+xp*6, 6), \n num=fig)\n fig.subplots_adjust(left=0.1 ,right=0.9)\n # nrows, ncols\n gs = gridspec.GridSpec(nrows, ncols, hspace=0.0, wspace=0.05, \n figure=fig)\n\n s = int(square)\n\n if show_Mstell and show_MUV:\n ax_uvlf = fig.add_subplot(gs[:4,0:2])\n ax_cmr4 = fig.add_subplot(gs[0,2:4])\n ax_cmr6 = fig.add_subplot(gs[1,2:4])\n ax_cmr8 = fig.add_subplot(gs[2,2:4])\n ax_cmr10 = fig.add_subplot(gs[3,2:4])\n \n ax_smf = fig.add_subplot(gs[s*5:,(1-s)*4:(1-s)*4+2])\n ax_cMs4 = fig.add_subplot(gs[s*5+0, (1-s)*4+2:])\n ax_cMs6 = fig.add_subplot(gs[s*5+1, (1-s)*4+2:])\n ax_cMs8 = fig.add_subplot(gs[s*5+2, (1-s)*4+2:])\n ax_cMs10 = fig.add_subplot(gs[s*5+3,(1-s)*4+2:]) \n \n ax_cMs = [ax_cMs4, ax_cMs6, ax_cMs8, ax_cMs10]\n else:\n if xp and num_uvlf_panels == 1:\n # cols, rows\n ax_extra = fig.add_subplot(gs[:,0:3])\n else:\n ax_extra = None\n \n if num_uvlf_panels == 2:\n ax_uvlf = fig.add_subplot(gs[:,0:3])\n ax_uvlf2 = fig.add_subplot(gs[:,3:6])\n ax_cmr4 = fig.add_subplot(gs[0,6:])\n ax_cmr6 = fig.add_subplot(gs[1,6:])\n ax_cmr8 = fig.add_subplot(gs[2,6:])\n ax_cmr10 = fig.add_subplot(gs[3,6:])\n else:\n ax_uvlf = fig.add_subplot(gs[:,0+4*xp:4*xp+3])\n ax_uvlf2 = None\n ax_cmr4 = fig.add_subplot(gs[0,3+4*xp:])\n ax_cmr6 = fig.add_subplot(gs[1,3+4*xp:])\n ax_cmr8 = fig.add_subplot(gs[2,3+4*xp:])\n ax_cmr10 = fig.add_subplot(gs[3,3+4*xp:])\n \n if show_Mstell and (not show_MUV):\n ax_smf = ax_uvlf\n ax_smf2 = ax_uvlf2\n ax_cMs4 = ax_cmr4\n ax_cMs6 = ax_cmr6\n ax_cMs8 = ax_cmr8\n ax_cMs10 = ax_cmr10\n ax_cMs = [ax_cMs4, ax_cMs6, ax_cMs8, ax_cMs10]\n else: \n ax_cMs = []\n ax_smf = None\n \n ax_cmd = [ax_cmr4, ax_cmr6, ax_cmr8, ax_cmr10]\n\n axes = ax_uvlf, ax_cmd, ax_smf, ax_cMs, ax_extra\n \n had_axes = False\n \n else:\n had_axes = True\n ax_uvlf, ax_cmd, ax_smf, ax_cMs, ax_extra = axes\n ax_cmr4, ax_cmr6, ax_cmr8, ax_cmr10 = ax_cmd\n \n if num_uvlf_panels == 2 and show_MUV:\n ax_uvlf2 = ax_extra\n if num_uvlf_panels == 2 and show_Mstell:\n ax_smf2 = ax_extra\n \n if show_Mstell:\n ax_cMs4, ax_cMs6, ax_cMs8, ax_cMs10 = ax_cMs\n \n if type(pop) in [list, tuple]:\n pops = pop\n else:\n pops = [pop]\n \n if zcal is not None:\n if type(zcal) != list:\n zcal = [zcal]\n \n l11 = read_lit('lee2011')\n b14 = read_lit('bouwens2014')\n f12 = read_lit('finkelstein2012')\n\n _colors = {4: 'k', 5: 'r', 6: 'b', 7: 'y', 8: 'c', 9: 'g', 10: 'm'}\n\n if num_uvlf_panels == 2:\n z_uvlf_flat = []\n for element in z_uvlf:\n z_uvlf_flat.extend(element)\n else:\n z_uvlf_flat = z_uvlf\n \n zall = np.sort(np.unique(np.concatenate((z_uvlf_flat, z_beta))))\n\n if cmap is not None:\n \n if (type(cmap) is str) or isinstance(cmap, ListedColormap):\n dz = 0.05\n _zall = np.arange(zall.min()-0.25, zall.max()+0.25, dz)\n znormed = (_zall - _zall[0]) / float(_zall[-1] - _zall[0])\n ch = cm.get_cmap(cmap, _zall.size)\n normz = lambda zz: (zz - _zall[0]) / float(_zall[-1] - _zall[0])\n colors = lambda z: ch(normz(z))\n self.colors = colors\n else:\n colors = cmap\n else:\n colors = lambda z: _colors[int(z)]\n\n ##\n # Plot data\n ##\n mkw = {'capthick': 1, 'elinewidth': 1, 'alpha': 1.0, 'capsize': 1}\n \n ct_lf = np.zeros(num_uvlf_panels)\n ct_b = 0\n for j, z in enumerate(zall):\n \n zstr = round(z)\n if z_only is not None:\n if zstr != z_only:\n continue\n \n zint = int(round(z, 0))\n\n if z in z_uvlf_flat:\n \n if num_uvlf_panels == 2:\n if z in z_uvlf[0]:\n _ax = ax_uvlf\n k = 0\n else:\n if show_MUV:\n _ax = ax_uvlf2\n else:\n _ax = ax_smf2\n \n k = 1\n else:\n _ax = ax_uvlf \n k = 0\n \n \n _ax_ = self.PlotLF(z, ax=_ax, color=colors(zint), mfc=colors(zint),\n mec=colors(zint), sources=sources, round_z=0.23, \n use_labels=0)\n \n if show_MUV and (not had_axes):\n if zcal is not None and z in zcal:\n bbox = dict(facecolor='none', edgecolor=colors(zint), fc='w',\n boxstyle='round,pad=0.3', alpha=1., zorder=1000)\n else:\n bbox = None \n \n _ax.text(0.95, 0.3-0.1*ct_lf[k], r'$z \\sim {}$'.format(z), \n transform=_ax.transAxes, color=colors(zint), \n ha='right', va='top', bbox=bbox, fontsize=20)\n \n #ax_uvlf.annotate(r'$z \\sim {}$'.format(z), (0.95, 0.25-0.05*ct_lf), \n # xycoords='axes fraction', color=colors[z], ha='right', va='top')\n\n\n if show_Mstell:\n \n if (not show_MUV):\n _ax2 = _ax\n else:\n _ax2 = ax_smf\n \n _ax_ = self.PlotSMF(z, ax=_ax2, color=colors(zint), mfc=colors(zint),\n mec=colors(zint), sources=sources, round_z=0.21, use_labels=0)\n \n if not had_axes:\n _ax2.annotate(r'$z \\sim {}$'.format(zint), (0.05, 0.3-0.1*ct_lf[k]), \n xycoords='axes fraction', color=colors(zint), \n ha='left', va='top', fontsize=20)\n\n ct_lf[k] += 1\n\n if z not in z_beta:\n continue\n\n if zint in b14.data['beta'] and show_MUV:\n err = b14.data['beta'][zint]['err'] + b14.data['beta'][zint]['sys']\n ax_cmd[j].errorbar(b14.data['beta'][zint]['M'], b14.data['beta'][zint]['beta'], \n yerr=err, \n fmt='o', color=colors(zint), label=b14.info['label'] if j == 0 else None,\n **mkw)\n \n #if z in l11.data['beta']:\n # ax_cmd[j].errorbar(l11.data['beta'][z]['M'], l11.data['beta'][z]['beta'], \n # l11.data['beta'][z]['err'], \n # fmt='*', color=colors[z], label=r'Lee+ 2011' if j == 0 else None,\n # **mkw)\n \n if not had_axes:\n \n if zcal is not None and z in zcal:\n bbox= dict(facecolor='none', edgecolor=colors(zint), fc='w',\n boxstyle='round,pad=0.3', alpha=1., zorder=1000)\n else:\n bbox = None \n \n if show_MUV:\n ax_cmd[j].text(0.05, 0.05, r'$z \\sim {}$'.format(zint), \n transform=ax_cmd[j].transAxes, color=colors(zint), \n ha='left', va='bottom', bbox=bbox, fontsize=20)\n \n #ax_cmd[j].annotate(r'$z \\sim {}$'.format(z), (0.95, 0.95), \n # ha='right', va='top', xycoords='axes fraction', color=colors[z])\n \n ct_b += 1\n \n if not show_Mstell:\n continue\n \n if z in f12.data['beta']: \n err = f12.data['beta'][zint]['err']\n ax_cMs[j].errorbar(10**f12.data['beta'][zint]['Ms'], \n f12.data['beta'][zint]['beta'], err.T[-1::-1],\n fmt='o', color=colors(zint),\n label=f12.info['label'] if j == 0 else None,\n **mkw)\n \n ##\n # Plot models\n ##\n Ms = np.arange(6, 13.25, 0.25)\n mags = np.arange(-25, -12-dmag, dmag)\n mags_cr = np.arange(-25, -10, dmag)\n hst_shallow = b14.filt_shallow\n hst_deep = b14.filt_deep\n calzetti = read_lit('calzetti1994').windows\n \n uvlf_by_pop = {}\n smf_by_pop = {}\n bphot_by_pop = {}\n bc94_by_pop = {}\n for h, pop in enumerate(pops):\n \n uvlf_by_pop[h] = {}\n smf_by_pop[h] = {}\n bphot_by_pop[h] = {}\n bc94_by_pop[h] = {}\n \n for j, z in enumerate(zall):\n zstr = round(z)\n \n if z_only is not None:\n if zstr != z_only:\n continue\n \n zint = int(round(z, 0))\n \n if z in z_uvlf_flat:\n \n if num_uvlf_panels == 2:\n if z in z_uvlf[0]:\n _ax = ax_uvlf\n else:\n if show_MUV:\n _ax = ax_uvlf2\n else:\n _ax = ax_smf2 \n else:\n _ax = ax_uvlf\n \n if show_MUV:\n phi = pop.LuminosityFunction(z, mags)\n uvlf_by_pop[h][z] = phi\n \n if not fill:\n _ax.semilogy(mags, phi, color=colors(zint),\n label=label if j == 0 else None, **kwargs)\n \n if show_Mstell:\n \n if (not show_MUV):\n _ax2 = _ax\n else:\n _ax2 = ax_smf\n \n phi = pop.StellarMassFunction(z, bins=Ms)\n smf_by_pop[h][z] = phi\n \n if not fill:\n _ax2.semilogy(10**Ms, phi, \n color=colors(zint), \n label=label if j == 0 else None,**kwargs) \n \n if z not in z_beta:\n continue\n \n if zstr >= 7:\n hst_filt = hst_deep\n else:\n hst_filt = hst_shallow\n \n cam = 'wfc', 'wfc3' if zstr <= 7 else 'nircam' \n filt = hst_filt[zstr] if zstr <= 7 else None\n fset = None if zstr <= 7 else 'M'\n \n #_mags = pop.Beta(z, Mbins=mags_cr, dlam=20.,\n # cam=cam, filters=filt, filter_set=fset, rest_wave=None)\n \n if beta_phot:\n beta = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n cam=cam, filters=filt, filter_set=fset, rest_wave=None,\n dlam=20.)\n else:\n beta = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n rest_wave=(1600., 3000.), dlam=20.)\n \n bphot_by_pop[h][z] = beta\n \n # Mask \n ok = np.logical_and(np.isfinite(beta), beta > -99999)\n if not fill:\n ax_cmd[j].plot(mags_cr[ok==1], beta[ok==1], color=colors(zint), **kwargs)\n \n if show_Mstell:\n \n _beta_c94 = pop.Beta(z, return_binned=False,\n cam='calzetti', filters=calzetti, dlam=dlam_c94, \n rest_wave=None)\n \n # _beta_c94 is 'raw', i.e., unbinned UV slopes for all halos.\n # Just need to bin as function of stellar mass.\n _Ms = pop.get_field(z, 'Ms')\n _nh = pop.get_field(z, 'nh')\n _x, _b, _err, _N = bin_samples(np.log10(_Ms), _beta_c94, Ms, \n weights=_nh)\n \n bc94_by_pop[h][z] = _b\n \n if not fill:\n ax_cMs[j].plot(10**_x, _b, color=colors(zint), **kwargs)\n \n ax_cMs[j].annotate(r'$z \\sim {}$'.format(zint), (0.05, 0.95), \n ha='left', va='top', xycoords='axes fraction', \n color=colors(zint), fontsize=20)\n \n if repeat_z and (j == 0) and (not fill):\n for k in range(1, 4):\n ax_cmd[k].plot(mags_cr, beta, color=colors(zint), **kwargs)\n if show_Mstell:\n ax_cMs[k].plot(10**Ms, _b, color=colors(zint), **kwargs)\n \n ##\n # Plot filled contours under certain conditions\n if fill and len(pops) == 2:\n for j, z in enumerate(zall):\n zstr = round(z)\n \n if z_only is not None:\n if zstr != z_only:\n continue\n \n if z in z_uvlf_flat:\n if num_uvlf_panels == 2:\n if z in z_uvlf[0]:\n _ax = ax_uvlf\n else:\n if show_MUV:\n _ax = ax_uvlf2\n else:\n _ax = ax_smf2\n else:\n _ax = ax_uvlf\n \n if show_MUV: \n _ax.fill_between(mags, uvlf_by_pop[0][z], \n uvlf_by_pop[1][z], color=colors(z),\n label=label if j == 0 else None, **kwargs)\n \n if show_Mstell:\n ax_smf.fill_between(10**Ms, smf_by_pop[0][z], \n smf_by_pop[1][z], color=colors(zint), **kwargs)\n \n if z not in z_beta:\n continue \n \n #ok = np.logical_and(np.isfinite(beta), beta > -99999)\n \n ax_cmd[j].fill_between(mags_cr, bphot_by_pop[0][z], \n bphot_by_pop[1][z], color=colors(zint), **kwargs)\n \n if show_Mstell: \n ax_cMs[j].fill_between(10**Ms, bc94_by_pop[0][z], \n bc94_by_pop[1][z], color=colors(zint), **kwargs)\n \n ##\n # Clean-up\n ## \n if num_uvlf_panels == 2:\n if show_MUV:\n ax_extra = ax_uvlf2\n else:\n ax_extra = ax_smf2\n \n if show_MUV: \n _axes_uvlf = [ax_uvlf] if num_uvlf_panels == 1 else [ax_uvlf, ax_uvlf2]\n else:\n _axes_uvlf = [ax_uvlf] if num_uvlf_panels == 1 else [ax_uvlf, ax_smf2]\n \n for i, ax in enumerate(_axes_uvlf + ax_cmd):\n \n if not show_MUV:\n break\n\n if ax is None:\n continue\n \n ax.set_xlim(-24, Mlim)\n ax.set_xticks(np.arange(-24, Mlim, 2))\n ax.set_xticks(np.arange(-24, Mlim, 1), minor=True) \n \n if i > (num_uvlf_panels - 1):\n if show_MUV:\n ax.set_ylabel(r'$\\beta_{\\mathrm{hst}}$')\n \n ax.set_yticks(np.arange(-2.8, -0.8, 0.4))\n ax.set_yticks(np.arange(-2.9, -1., 0.1), minor=True)\n ax.set_ylim(-2.9, -1.)\n \n if not show_Mstell:\n ax.yaxis.tick_right()\n ax.yaxis.set_label_position(\"right\")\n \n if i < 4 + (num_uvlf_panels - 1):\n ax.set_xticklabels([])\n else:\n if beta_phot:\n ax.set_xlabel(r'$\\langle M_{\\mathrm{UV}} \\rangle$')\n else:\n ax.set_xlabel(r'$M_{\\mathrm{UV}}$')\n \n ax.yaxis.set_ticks_position('both')\n else:\n ax.set_xlabel(r'$M_{1600}$')\n ax.set_ylim(1e-7, 1e-1)\n if i == 0:\n ax.set_ylabel(labels['galaxy_lf'])\n elif num_uvlf_panels == 2 and i == 1:\n ax.set_yticklabels([])\n \n if show_Mstell:\n ax_smf.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n ax_smf.set_ylabel(labels['galaxy_smf'])\n ax_smf.set_xscale('log') \n ax_smf.set_ylim(1e-7, 1e-1)\n ax_smf.set_xlim(1e7, 7e11)\n \n if num_uvlf_panels == 2:\n ax_smf2.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n ax_smf2.set_xscale('log') \n ax_smf2.set_ylim(1e-7, 1e-1)\n ax_smf2.set_xlim(1e7, 7e11)\n ax_smf2.set_yticklabels([])\n \n for i, ax in enumerate([ax_cMs4, ax_cMs6, ax_cMs8, ax_cMs10]): \n \n if ax is None:\n continue\n \n ax.set_xscale('log')\n ax.set_xlim(1e7, 7e11)\n ax.set_ylabel(r'$\\beta_{\\mathrm{c94}}$')\n ax.set_yticks(np.arange(-2.8, -0.8, 0.4))\n ax.set_yticks(np.arange(-2.9, -1., 0.1), minor=True)\n ax.set_ylim(-2.9, -1.)\n \n if not show_MUV:\n ax.yaxis.set_label_position(\"right\")\n ax.yaxis.set_ticks_position('right') \n \n if i < 3:\n ax.set_xticklabels([])\n else:\n ax.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n \n return ax_uvlf, ax_cmd, ax_smf, ax_cMs, ax_extra\n \n def PlotColorEvolution(self, pop, zarr=None, axes=None, fig=1, \n wave_lo=None, wave_hi=None, show_beta_spec=True,\n show_beta_hst=True, show_beta_combo=True, show_beta_jwst=True, \n magmethod='gmean', include_Mstell=True, MUV=[-19.5], ls='-', \n return_data=True, data=None, **kwargs):\n \"\"\"\n Plot Beta(z) at fixed MUV and (optionally) Mstell.\n \"\"\"\n \n if axes is None:\n fig = pl.figure(tight_layout=False, figsize=(8, 8), num=fig)\n fig.subplots_adjust(left=0.2)\n gs = gridspec.GridSpec(2, 1+include_Mstell, hspace=0.05, \n wspace=0.05, figure=fig)\n\n axB = fig.add_subplot(gs[0,0])\n axD = fig.add_subplot(gs[1,0])\n \n if include_Mstell:\n axB2 = fig.add_subplot(gs[0,1])\n axD2 = fig.add_subplot(gs[1,1])\n else:\n axB2 = axD2 = None\n else:\n axB, axD, axB2, axD2 = axes\n\n # Plot the Bouwens data\n zbrack = [3.8, 5.0, 5.9, 7.0, 8.0]\n Beta195 = [-1.85, -1.91, -2.00, -2.05, -2.13]\n Beta195_err = [0.01, 0.02, 0.05, 0.09, 0.44]\n Beta195_sys = [0.06, 0.06, 0.08, 0.13, 0.27]\n\n dBdMUV = [-0.11, -0.14, -0.2, -0.2]\n dB_err = [0.01, 0.02, 0.04, 0.07]\n\n axB.errorbar(zbrack, Beta195, yerr=Beta195_sys, fmt='o', zorder=10,\n color='r')\n axD.errorbar(zbrack[:-1], -np.array(dBdMUV), yerr=np.array(dB_err), \n fmt='o', zorder=10, color='r')\n\n mags = np.arange(-25, -10, 0.1)\n mags_cr = np.arange(-25.5, -10, 0.5)\n \n if zarr is None:\n zarr = np.arange(4, 12., 1.)\n\n linfunc = lambda x, p0, p1: p0 * (x - 8.) + p1\n cubfunc = lambda x, p0, p1, p2: p0 * (x - 8.)**2 + p1 * (x - 8.) + p2\n \n f12 = read_lit('finkelstein2012')\n calzetti = read_lit('calzetti1994').windows\n \n if wave_lo is None:\n wave_lo = np.min(calzetti)\n if wave_hi is None:\n wave_hi = np.max(calzetti)\n \n ##\n # Plot data: use same color-conventions as F12 for Mstell-beta stuff.\n ##\n if include_Mstell:\n colors = 'r', 'r', 'r'\n markers = 'v', 's', '^'\n Mstell = np.array([7.5, 8.5, 9.5])\n for z in [4,5,6,7,8]:\n for i, _Mstell in enumerate(Mstell):\n x = z\n y = f12.data['beta'][z]['beta'][i]\n yerr = np.array([f12.data['beta'][z]['err'][i]]).T[-1::-1]\n \n if axes is None:\n lab = r'$%.1f \\leq \\log_{10} (M_{\\ast} / M_{\\odot}) \\leq %.1f$' \\\n % (_Mstell-0.5, _Mstell+0.5) if z == 4 else None\n else:\n lab = None\n \n axB2.errorbar(z, y, yerr=yerr, fmt=markers[i], \n color=colors[i], alpha=1.)\n \n #if z == 4:\n # axB2.annotate(lab, (0.95, 0.95-0.05*i), ha='right', va='top',\n # color=colors[i], fontsize=12, xycoords='axes fraction')\n \n axD2.errorbar(z, f12.data['slope_wrt_mass'][z]['slope'],\n yerr=f12.data['slope_wrt_mass'][z]['err'],\n color='r', fmt='o', alpha=1.)\n \n ##\n # Continue with model predictions\n ##\n\n # For CANDELS, ERS \n b14 = read_lit('bouwens2014')\n hst_shallow = b14.filt_shallow\n hst_deep = b14.filt_deep\n\n if show_beta_jwst:\n nircam = Survey(cam='nircam')\n nircam_M = nircam._read_nircam(filter_set='M')\n nircam_W = nircam._read_nircam(filter_set='W')\n\n ##\n # Loop over models and reconstruct best-fitting Beta(z).\n ##\n Ms_b = np.arange(6.5, 11., 0.5)\n colors = 'k', 'k', 'k', 'k'\n\n if len(MUV) != len(ls):\n ls = ['-'] * len(MUV)\n \n ##\n # Won't be able to do DerivedBlob for 'nozevo' case because we only\n # saved at one redshift :( Will be crude for others. Could re-generate\n # samples later (parallelize, on cluster).\n ##\n _colors = {4: 'k', 5: 'r', 6: 'b', 7: 'y', 8: 'c', 9: 'g', 10: 'm'}\n mkw = {'capthick': 1, 'elinewidth': 1, 'alpha': 1.0, 'capsize': 1} \n \n pb = ProgressBar(zarr.size, name='beta(z)')\n pb.start()\n \n B195_hst = -99999 * np.ones((len(zarr), len(MUV)))\n dBdM195_hst = -99999 * np.ones((len(zarr), len(MUV)))\n B195_spec = -99999 * np.ones((len(zarr), len(MUV)))\n dBdM195_spec = -99999 * np.ones((len(zarr), len(MUV)))\n B195_jwst = -99999 * np.ones((len(zarr), len(MUV)))\n dBdM195_jwst = -99999 * np.ones((len(zarr), len(MUV)))\n B195_M = -99999 * np.ones((len(zarr), len(MUV)))\n dBdM195_M = -99999 * np.ones((len(zarr), len(MUV)))\n BMstell = -99999 * np.ones((len(zarr), len(Ms_b)))\n dBMstell = -99999 * np.ones((len(zarr), len(Ms_b)))\n for j, z in enumerate(zarr):\n \n if data is not None:\n break\n \n t1 = time.time()\n print(\"Colors at z={}...\".format(z))\n \n zstr = round(z)\n \n if zstr >= 6:\n hst_filt = hst_deep\n else:\n hst_filt = hst_shallow\n \n fset = None\n if zstr <= 8:\n cam = ('wfc', 'wfc3')\n filt = hst_filt[zstr]\n\n beta_hst = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n cam=cam, filters=filt, filter_set=fset, rest_wave=None,\n magmethod=magmethod) \n #beta_hst_M1600 = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n # cam=cam, filters=filt, filter_set=fset, rest_wave=None,\n # magmethod='mono')\n else:\n \n beta_hst = beta_hst_M1600 = -np.inf * np.ones_like(mags_cr)\n \n # Compute raw beta and compare to Mstell \n beta_c94 = pop.Beta(z, Mwave=1600., Mbins=mags_cr, return_binned=True,\n cam='calzetti', filters=calzetti, dlam=1., rest_wave=None,\n magmethod='mono')\n \n # Compute beta(Mstell)\n if include_Mstell:\n beta_Mst = pop.Beta(z, Mwave=1600., return_binned=False,\n cam='calzetti', filters=calzetti, dlam=1., rest_wave=None,\n Mstell=10**Ms_b, massbins=Ms_b)\n \n dbeta = pop.dBeta_dMstell(z, Mstell=10**Ms_b, \n massbins=Ms_b, dlam=1.)\n \n BMstell[j,:] = beta_Mst\n dBMstell[j,:] = dbeta\n \n # Compute beta given JWST W only\n # \n if show_beta_jwst:\n nircam_W_fil = what_filters(z, nircam_W, wave_lo, wave_hi)\n # Extend the wavelength range until we get two filters\n ct = 1\n while len(nircam_W_fil) < 2:\n nircam_W_fil = what_filters(z, nircam_W, wave_lo, \n wave_hi + 20 * ct)\n \n ct += 1\n \n if ct > 1: \n print(\"For JWST W filters at z={}, extend wave_hi to {}A\".format(z,\n wave_hi + 10 * (ct - 1))) \n \n filt2 = tuple(nircam_W_fil)\n \n beta_W = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n cam=('nircam', ), filters=filt2, filter_set=fset, \n rest_wave=None, magmethod=magmethod)\n \n # Compute beta w/ JWST 'M' only\n nircam_M_fil = what_filters(z, nircam_M, wave_lo, wave_hi)\n \n filt3 = tuple(nircam_M_fil)\n \n if z >= 6:\n beta_M = pop.Beta(z, Mbins=mags_cr, return_binned=True,\n cam=('nircam',), filters=filt3, rest_wave=None,\n magmethod=magmethod)\n else:\n beta_M = -np.inf * np.ones_like(mags_cr) \n else:\n beta_W = beta_M = None\n \n # Compute Beta at MUV=-19.5 (or others)\n for k, beta in enumerate([beta_c94, beta_hst, beta_W, beta_M]):\n \n if (not show_beta_jwst) and k > 1:\n continue\n \n for l, mag in enumerate(MUV):\n \n _i195 = np.argmin(np.abs(mags_cr - mag))\n _B195 = beta[_i195]\n \n # Compute dBeta/dMag via finite difference.\n #_xx = mags[_i195-3:_i195+4]\n #_yy = beta[_i195-3:_i195+4]\n #\n #xx, yy = central_difference(_xx, _yy)\n #\n ## Smooth this out by just using last two points\n #slope = np.interp(-19.5, [xx[0], xx[-1]], [yy[0], yy[-1]])\n \n # Compute dBeta/dMag by fitting PL to points.\n _xx = mags_cr[_i195-1:_i195+2]\n _yy = beta[_i195-1:_i195+2]\n \n if not np.any(np.isfinite(_yy)):\n continue\n \n func = lambda xx, p0, p1: p0 + p1 * xx\n popt, pcov = curve_fit(func, _xx, _yy, \n p0=np.array([-2., 0.]))\n \n norm = popt[0]\n slope = popt[1]\n \n if k == 0:\n B195_spec[j,l] = _B195\n dBdM195_spec[j,l] = slope\n elif k == 1:\n B195_hst[j,l] = _B195\n dBdM195_hst[j,l] = slope\n elif k == 2:\n B195_jwst[j,l] = _B195\n dBdM195_jwst[j,l] = slope\n elif k == 3:\n B195_M[j,l] = _B195\n dBdM195_M[j,l] = slope\n else:\n pass\n \n pb.update(j) \n t2 = time.time()\n \n print(t2 - t1)\n \n pb.finish() \n \n if data is not None:\n _MUV, B195_spec, B195_hst, B195_jwst, B195_M, BMstell, \\\n dBdM195_spec, dBdM195_hst, dBdM195_jwst, dBdM195_M, \\\n dBMstell = data \n \n assert np.array_equal(_MUV, MUV) \n \n ##\n # Finish up and plot.\n ## \n if show_beta_spec:\n for l, mag in enumerate(MUV):\n _beta = B195_spec[:,l]\n ok = _beta > -99999\n \n axB.plot(zarr[ok==1], _beta[ok==1], lw=1,\n label=r'$M_{\\mathrm{UV}}=%.1f$' % mag,\n color='k', ls=ls[l])\n axD.plot(zarr[ok==1], -dBdM195_spec[ok==1,l], lw=1, \n label=r'$M_{\\mathrm{UV}}=%.1f$' % mag, \n color='k', ls=ls[l])\n \n #boff = -0.2 if l == 0 else 0.2\n #axB.annotate(r'$M_{\\mathrm{UV}}=%.1f$' % mag,\n # (zarr[ok==1][-1]+0.2, _beta[ok==1][-1]+boff), \n # ha='right', va='top' if l == 0 else 'bottom')\n \n if show_beta_hst:\n for l, mag in enumerate(MUV):\n _beta = B195_hst[:,l]\n ok = _beta > -99999\n axB.plot(zarr[ok==1], _beta[ok==1], lw=2, \n color='b', ls=ls[l])\n axD.plot(zarr[ok==1], -dBdM195_hst[ok==1,l], lw=2, \n color='b', ls=ls[l])\n \n #B195_spec_2 = np.array(B195_spec_2) \n #dBdM195_spec_2 = np.array(dBdM195_spec_2)\n #ok_spec = B195_spec_2 > -99999\n #axB.plot(zarr[ok_spec==1], B195_spec_2[ok_spec==1], lw=2, \n # color='k', ls=':')\n #axD.plot(zarr[ok_spec==1], -dBdM195_spec_2[ok_spec==1], lw=2, ls=ls,\n # color='k', ls=':') \n \n if show_beta_combo:\n for l, mag in enumerate(MUV):\n _beta = B195_jwst[:,l]\n ok = np.logical_and(_beta > -99999, zarr <= 9.)\n axB.plot(zarr[ok==1], _beta[ok==1], lw=2,\n color='m', ls=ls[l])\n axD.plot(zarr[ok==1], -dBdM195_jwst[ok==1,l], lw=2,\n color='m', ls=ls[l])\n \n if show_beta_jwst:\n for l, mag in enumerate(MUV):\n _beta = B195_M[:,l]\n ok = _beta > -99999\n ok = np.logical_and(ok, zarr >= 6)\n axB.plot(zarr[ok==1], _beta[ok==1], lw=2, \n color='c', ls=ls[l])\n axD.plot(zarr[ok==1], -dBdM195_M[ok==1,l], lw=2,\n color='c', ls=ls[l])\n \n ##\n # Plot Mstell stuff\n ##\n if include_Mstell:\n _ls = '-', '--', ':', '-.'\n for _j, logM in enumerate([7.5, 8.5, 9.5]):\n j = np.argmin(np.abs(Ms_b - logM))\n axB2.plot(zarr, BMstell[:,j], ls=_ls[_j], color='k',\n label=r'$M_{\\ast} = 10^{%i} \\ M_{\\odot}$' % logM) \n axD2.plot(zarr, dBMstell[:,j], ls=_ls[_j], color='k',\n label=r'$M_{\\ast} = 10^{%i} \\ M_{\\odot}$' % logM)\n \n \n ##\n # Clean up\n ##\n axD.set_yticks(np.arange(0.0, 0.6, 0.2))\n axD.set_yticks(np.arange(0.0, 0.6, 0.1), minor=True)\n axD.legend(loc='upper right', frameon=True, fontsize=8,\n handlelength=2, ncol=1)\n axD.set_xlim(3.5, zarr.max()+0.5)\n axD.set_ylim(0., 0.5)\n axD.yaxis.set_ticks_position('both')\n \n axB.set_xlim(3.5, zarr.max()+0.5)\n axB.set_ylim(-3.05, -1.3)\n axB.yaxis.set_ticks_position('both')\n axB.set_yticks(np.arange(-3, -1.3, 0.25), minor=False)\n axB.set_yticks(np.arange(-3, -1.3, 0.05), minor=True)\n axB.legend(loc='upper right', frameon=True, fontsize=8,\n handlelength=2, ncol=1)\n axB.set_ylim(-2.8, -1.4) \n axB.set_xticklabels([])\n \n if include_Mstell:\n axB2.set_ylim(-3.05, -1.3)\n axB2.set_xlim(3.5, zarr.max()+0.5)\n axB2.set_yticks(np.arange(-3, -1.3, 0.25), minor=False)\n axB2.set_yticks(np.arange(-3, -1.3, 0.05), minor=True)\n axB2.legend(loc='upper right', frameon=True, fontsize=8,\n handlelength=2, ncol=1)\n \n axD2.set_xlim(3.5, zarr.max()+0.5)\n axD2.set_ylim(0., 0.5)\n axD2.set_yticks(np.arange(0.0, 0.6, 0.2))\n axD2.set_yticks(np.arange(0.0, 0.6, 0.1), minor=True) \n axD2.legend(loc='upper right', frameon=True, fontsize=8,\n handlelength=2, ncol=1)\n \n if axes is None:\n axD2.set_xlabel(r'$z$')\n #axB2.set_xlabel(r'$z$')\n axB2.set_xticklabels([])\n axB2.yaxis.tick_right()\n axD2.yaxis.tick_right()\n axB2.yaxis.set_ticks_position('both')\n axD2.yaxis.set_ticks_position('both')\n axB2.yaxis.set_label_position(\"right\")\n axD2.yaxis.set_label_position(\"right\")\n axB2.set_ylabel(r'$\\beta_{\\mathrm{c94}}$')\n axD2.set_ylabel(r'$d\\beta_{\\mathrm{c94}}/dlog_{10}M_{\\ast}$')\n axB2.set_ylim(-2.8, -1.4)\n \n\n \n\n if axes is None:\n axB.set_ylabel(r'$\\beta$')\n axD.set_ylabel(r'$-d\\beta/dM_{\\mathrm{UV}}$')\n axD.set_xlabel(r'$z$')\n #axB.set_xlabel(r'$z$')\n \n #for ax in [axB, axD, axB2, axD2]:\n # if ax is None:\n # continue\n # ax.yaxis.set_label_coords(-0.1-0.08*include_Mstell, 0.5)\n # ax.yaxis.set_label_coords(-0.1-0.08*include_Mstell, 0.5)\n \n if return_data:\n data = (MUV, B195_spec, B195_hst, B195_jwst, B195_M, BMstell, \n dBdM195_spec, dBdM195_hst, dBdM195_jwst, dBdM195_M, dBMstell)\n return (axB, axD, axB2, axD2), data \n else:\n data = None \n \n return axB, axD, axB2, axD2\n \n def Plot(self, z, ax=None, fig=1, sources='all', round_z=False, force_labels=False,\n AUV=None, wavelength=1600., sed_model=None, quantity='lf', use_labels=True,\n take_log=False, imf=None, mags='intrinsic', sources_except=[], **kwargs):\n \"\"\"\n Plot the luminosity function data at a given redshift.\n \n Parameters\n ----------\n z : int, float\n Redshift of interest\n wavelength : int, float\n Wavelength (in Angstroms) of LF.\n sed_model : instance\n ares.sources.SynthesisModel\n imf : str\n Stellar initial mass function. Will be used to convert stellar\n masses, if supplied. \n \n \"\"\"\n \n if ax is None:\n gotax = False\n fig = pl.figure(fig)\n ax = fig.add_subplot(111)\n else:\n gotax = True\n \n data = self.compile_data(z, sources, round_z=round_z, \n quantity=quantity, sources_except=sources_except)\n \n if isinstance(sources, basestring):\n if sources in groups[quantity]:\n if sources == 'all':\n srcs = []\n for src in groups[quantity]['all']:\n if src in sources_except:\n continue\n srcs.append(src)\n else: \n srcs = groups[quantity][sources]\n else:\n srcs = [sources]\n else:\n srcs = sources\n \n for source in srcs:\n if source not in data:\n continue \n \n M = data[source]['M']\n phi = data[source]['phi']\n err = data[source]['err']\n ulim = data[source]['ulim']\n \n mkw = {'capthick': 1, 'elinewidth': 1, 'alpha': 1.0, 'capsize': 1,\n 'mec':default_colors[source],\n 'fmt': default_markers[source],\n 'color':default_colors[source]}\n\n if not use_labels:\n label = None\n elif ('label' not in kwargs):\n if 'label' in data[source]:\n label = data[source]['label']\n else:\n label = source\n else:\n label = kwargs['label']\n \n mkw['label'] = label\n mkw.update(kwargs)\n \n if AUV is not None:\n dc = AUV(z, np.array(M))\n else:\n dc = 0\n \n # Shift band [optional]\n if quantity in ['lf']:\n if data[source]['wavelength'] != wavelength:\n #shift = sed_model.\n print(\"WARNING: {0!s} wavelength={1}A, not {2}A!\".format(\\\n source, data[source]['wavelength'], wavelength))\n #else:\n if source in ['stefanon2017', 'duncan2014']:\n shift = 0.25\n print(\"Shifting stellar masses by 0.25 dex (Chabrier -> Salpeter) for source={}\".format(source))\n else: \n shift = 0. \n \n ax.errorbar(M+shift-dc, phi, yerr=err, uplims=ulim, zorder=10, \n **mkw)\n\n if quantity == 'lf':\n ax.set_xticks(np.arange(-26, 0, 1), minor=True)\n ax.set_xlim(-26.5, -10)\n ax.set_yscale('log')\n ax.set_ylim(1e-7, 1)\n if (not gotax) or force_labels:\n ax.set_xlabel(r'$M_{\\mathrm{UV}}$')\n ax.set_ylabel(r'$\\phi(M_{\\mathrm{UV}}) \\ [\\mathrm{mag}^{-1} \\ \\mathrm{cMpc}^{-3}]$')\n elif quantity == 'smf':\n try:\n ax.set_xscale('log')\n ax.set_yscale('log')\n except ValueError:\n pass\n ax.set_xlim(1e7, 1e13)\n ax.set_ylim(1e-7, 1)\n if (not gotax) or force_labels:\n ax.set_xlabel(r'$M_{\\ast} / M_{\\odot}$') \n ax.set_ylabel(r'$\\phi(M_{\\ast}) \\ [\\mathrm{dex}^{-1} \\ \\mathrm{cMpc}^{-3}]$')\n elif quantity == 'mzr':\n ax.set_xlim(1e8, 1e12)\n ax.set_ylim(7, 9.5)\n \n if (not gotax) or force_labels:\n ax.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n ax.set_ylabel(r'$12+\\log{\\mathrm{O/H}}$')\n\n pl.draw()\n\n return ax\n\n def MultiPlot(self, redshifts, sources='all', round_z=False, ncols=1, \n panel_size=(0.75,0.75), fig=1, xmax=-10, ymax=10, legends=None, AUV=None,\n quantity='lf', mp=None, sources_except=[], \n mp_kwargs={}, show_ylabel=True, **kwargs):\n \"\"\"\n Plot the luminosity function at a bunch of different redshifts.\n \n Parameters\n ----------\n z : list\n List of redshifts to include.\n ncols : int\n How many columns in multiplot? Number of rows will be determined\n automatically.\n legends : bool, str\n 'individual' means one legend per axis, 'master' means one\n (potentially gigantic) legend.\n \n \"\"\" \n \n if ncols == 1:\n nrows = len(redshifts)\n else:\n nrows = len(redshifts) // ncols\n \n if nrows * ncols != len(redshifts):\n nrows += 1\n \n dims = (nrows, ncols) \n \n # Force redshifts to be in ascending order\n if not np.all(np.diff(redshifts)) > 0: \n redshifts = np.sort(redshifts)\n \n if mp_kwargs == {}:\n mp_kwargs = {'panel_size': panel_size, 'padding': [0.2]*2}\n \n annotate_z = 'left' if quantity == 'lf' else 'right'\n \n # Create multiplot\n if mp is None:\n gotmp = False\n mp = MultiPanel(dims=dims, fig=fig, **mp_kwargs)\n else:\n gotmp = True\n assert mp.dims == dims\n \n if not hasattr(self, 'redshifts_in_mp'):\n self.redshifts_in_mp = {}\n \n if quantity not in self.redshifts_in_mp:\n self.redshifts_in_mp[quantity] = []\n \n for i, z in enumerate(redshifts):\n k = mp.elements.ravel()[i]\n ax = mp.grid[k]\n \n # Where in the MultiPlot grid are we?\n self.redshifts_in_mp[quantity].append(k)\n \n self.Plot(z, sources=sources, round_z=round_z, ax=ax, AUV=AUV,\n quantity=quantity, sources_except=sources_except, **kwargs)\n \n if annotate_z == 'left':\n _xannot = 0.05\n else:\n _xannot = 0.95\n \n if gotmp:\n continue\n \n ax.annotate(r'$z \\sim {}$'.format(round(z, 1)), (_xannot, 0.95), \n ha=annotate_z, va='top', xycoords='axes fraction')\n \n if gotmp:\n return mp\n \n for i, z in enumerate(redshifts):\n k = mp.elements.ravel()[i]\n ax = mp.grid[k]\n \n if quantity == 'lf':\n ax.set_xlim(-24, xmax)\n ax.set_ylim(1e-7, ymax)\n ax.set_yscale('log', nonposy='clip') \n ax.set_ylabel('')\n ax.set_xlabel(r'$M_{\\mathrm{UV}}$')\n else:\n ax.set_xscale('log')\n ax.set_xlim(1e6, 1e12)\n ax.set_ylim(1e-7, ymax)\n ax.set_yscale('log', nonposy='clip') \n ax.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n \n if show_ylabel:\n if quantity == 'lf':\n mp.global_ylabel(r'$\\phi(M_{\\mathrm{UV}}) \\ [\\mathrm{mag}^{-1} \\ \\mathrm{cMpc}^{-3}]$')\n else:\n mp.global_ylabel(r'$\\phi(M_{\\ast}) \\ [\\mathrm{dex}^{-1} \\ \\mathrm{cMpc}^{-3}]$')\n \n \n pl.show() \n \n return mp\n \n def _selected(self, color1, color2, lbcut, ccut, degen):\n \n inter, slope = degen\n \n is_highz = np.logical_and(color1 >= lbcut, color2 <= ccut)\n \n x = color1#np.arange(lbcut, 3.5, 0.01)\n y = (x - inter) / slope\n \n is_highz = np.logical_and(color2 <= y, is_highz)\n \n return is_highz\n \n \n def PlotColorColor(self, pop, redshifts=[4,5,6,7], cuts='bouwens2015',\n fig=None, show_false_neg=True):\n \"\"\"\n Make color-color plot including high-z selection criteria.\n \"\"\"\n \n Nz = len(redshifts)\n \n if (fig is None) or (type(fig) is int):\n fig = pl.figure(tight_layout=False, \n figsize=(4*Nz, 4 * (1+show_false_neg)), num=fig)\n fig.subplots_adjust(left=0.15, bottom=0.15, top=0.9, right=0.9)\n \n gs = gridspec.GridSpec(1+show_false_neg, Nz, \n hspace=0.5, wspace=0.3, figure=fig)\n \n color_selection = read_lit(cuts).color_selection\n \n names = read_lit('bouwens2014').filter_names\n cam = ('wfc', 'wfc3')\n \n phot = {}\n axes = []\n for i, z in enumerate(redshifts):\n \n ax = fig.add_subplot(gs[0,i])\n ax2 = fig.add_subplot(gs[1,i])\n #ax3 = fig.add_subplot(gs[2,i])\n #axes.append(ax)\n #\n #ax4 = fig2.add_subplot(gs2[0,i])\n \n ax.annotate(r'$z \\sim {}$'.format(z), (0.05, 0.95), ha='left',\n va='top', xycoords='axes fraction')\n \n cuts = color_selection[z]\n \n n1A, n1B, n1gt = cuts[0]\n n2A, n2B, n2lt = cuts[1]\n inter, slope = cuts[2]\n \n # color1 = ph_mags[ph_fil.index(n1A)] - ph_mags[ph_fil.index(n1B)]\n # color2 = ph_mags[ph_fil.index(n2A)] - ph_mags[ph_fil.index(n2B)]\n \n # Left rectangle: constraint on color2 (y axis)\n ax.fill_betweenx([n2lt, 3.5], -1, 3.5, color='k', alpha=0.2,\n edgecolors='none')\n # Bottom rectangle: constraint on color1 (x axis)\n ax.fill_between([-1,n1gt], -1, n2lt, color='k', alpha=0.2,\n edgecolors='none')\n \n #y = np.arange(-1, n2lt+0.05, 0.05)\n #x = inter + y * slope\n x = np.arange(n1gt, 3.5, 0.01)\n y = (x - inter) / slope\n \n ok = y <= n2lt\n \n ax.fill_between(x[ok==1], y[ok==1], np.ones_like(y[ok==1]) * n2lt, \n color='k', alpha=0.2)\n \n ax.set_xlabel(r'{}-{}'.format(names[n1A], names[n1B]))\n ax.set_ylabel(r'{}-{}'.format(names[n2A], names[n2B]))\n \n hist = pop.histories\n \n dL = pop.cosm.LuminosityDistance(z)\n magcorr = 5. * (np.log10(dL / cm_per_pc) - 1.)\n \n ph_mags = []\n ph_xph = []\n ph_dx = []\n ph_fil = []\n for j, _cam in enumerate(cam):\n \n _filters, xphot, dxphot, ycorr = \\\n pop.synth.Photometry(zobs=z, sfh=hist['SFR'], zarr=hist['z'],\n hist=hist, dlam=20., cam=_cam, filters=list(names.keys()),\n extras=pop.extras, rest_wave=None, load=False)\n \n ph_mags.extend(list(np.array(ycorr) - magcorr))\n ph_xph.extend(xphot)\n ph_dx.extend(list(np.sum(dxphot, axis=1).squeeze()))\n ph_fil.extend(_filters)\n \n ph_mags = np.array(ph_mags)\n \n phot[z] = ph_mags\n \n _color1 = ph_mags[ph_fil.index(n1A)] - ph_mags[ph_fil.index(n1B)]\n _color2 = ph_mags[ph_fil.index(n2A)] - ph_mags[ph_fil.index(n2B)]\n \n is_highz = self._selected(_color1, _color2, n1gt, n2lt, \n (inter, slope))\n \n false_neg = (is_highz.size - is_highz.sum()) / float(is_highz.size)\n print('False negatives at z={}: {}'.format(z, false_neg))\n \n #for _ax in axes:\n ax.scatter(_color1[is_highz==1], _color2[is_highz==1], color='b',\n facecolors='b', edgecolors='none', alpha=0.01)\n ax.scatter(_color1[is_highz==0], _color2[is_highz==0], color='r',\n facecolors='r', edgecolors='none', alpha=0.01) \n \n ax.set_xlim(-0.5, 3.5)\n ax.set_ylim(-0.5, 2)\n \n if not show_false_neg:\n continue\n \n # Plot false negative rate vs. SFR\n sfr = pop.get_field(z, 'SFR')\n Mh = pop.get_field(z, 'Mh')\n sfr_bins = np.arange(-3, 3, 0.2)\n \n # y is irrelevant here\n x1, y1, std1, N1 = bin_samples(np.log10(sfr[is_highz==1]), \n Mh[is_highz==1],\n sfr_bins, return_N=True, inclusive=True)\n x2, y2, std2, N2 = bin_samples(np.log10(sfr[is_highz==0]), \n Mh[is_highz==0],\n sfr_bins, return_N=True, inclusive=True) \n \n tot_by_bin = N1 + N2\n \n ax2.semilogx(10**x1, N2 / tot_by_bin.astype('float'), color='k')\n ax2.set_ylim(-0.05, 1.05)\n ax2.set_xlabel(r'$\\dot{M}_{\\ast} \\ [M_{\\odot} \\ \\mathrm{yr}^{-1}]$')\n \n if i == 0:\n ax2.set_ylabel('false negative rate') \n \n return fig, gs \n \n def PlotScalingRelations(self, include=['SMHM', 'MZR', 'MS'], ncols=None):\n \"\"\"\n \n \"\"\"\n pass\n \n def PlotTrajectories(self):\n pass\n \n def annotated_legend(self, ax, loc=(0.95, 0.05), sources='all'): \n \"\"\"\n Annotate sources properly color-coded.\n \"\"\" \n if sources in groups[quantity]:\n srcs = groups[quantity][sources]\n elif isinstance(sources, basestring):\n srcs = [sources]\n \n for i, source in enumerate(srcs):\n coord = (loc[0], loc[1] + 0.05 * i) \n ax.annotate(source, coord, fontsize=14, \n color=default_colors[source], ha='right', va='bottom',\n xycoords='axes fraction')\n \n pl.draw()\n \n return ax\n\n def add_master_legend(self, mp, **kwargs):\n return add_master_legend(mp, **kwargs)\n \n def PlotSummary(self, pop, axes=None, fig=1, use_best=True, method='mode',\n fresh=False, redshifts=None, **kwargs):\n \"\"\"\n Make a huge plot.\n \"\"\"\n \n if axes is None:\n gotax = False\n axes = self._MegaPlotSetup(fig)\n else:\n gotax = True\n\n if not gotax:\n self._MegaPlotCalData(axes, redshifts=redshifts)\n self._MegaPlotPredData(axes, redshifts=redshifts)\n self._MegaPlotGuideEye(axes, redshifts=redshifts)\n\n if pop is None:\n pass\n elif isinstance(pop, GalaxyEnsemble):\n self._MegaPlotPop(axes, pop, redshifts=redshifts)\n elif hasattr(pop, 'chain'):\n if fresh:\n bkw = pop.base_kwargs.copy()\n bkw.update(pop.max_likelihood_parameters(method=method))\n pop = GP(**bkw)\n self._MegaPlotPop(axes, pop, redshifts=redshifts)\n else:\n self._MegaPlotChain(axes, pop, use_best=use_best, **kwargs)\n else:\n raise NotImplemented(\"Unrecognized object pop={}\".format(pop))\n \n self._MegaPlotCleanup(axes)\n \n return axes\n \n def _MegaPlotPop(self, kw, pop, redshifts=None, **kwargs):\n \n \n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet'] \n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n _mst = np.arange(6, 14, 0.2)\n _mh = np.logspace(6, 13, 100)\n _mags = np.arange(-25, -10, pop.pf['pop_mag_bin'])\n \n if redshifts is None:\n redshifts = [4, 6, 8, 10]\n \n colors = ['k', 'b', 'c', 'm', 'g', 'y', 'r']\n \n dc1 = DustCorrection(dustcorr_method='meurer1999',\n dustcorr_beta='bouwens2014')\n \n xa_b = []\n xa_f = []\n for j, z in enumerate(redshifts):\n \n # UVLF\n phi = pop.LuminosityFunction(z, _mags)\n ax_phi.semilogy(_mags, phi, color=colors[j], drawstyle='steps-mid')\n \n # Binned version\n if z <= 7:\n Mbins = np.arange(-25, -10, 1.0)\n if pop.pf['pop_dust_yield'] is not None:\n _beta = pop.Beta(z, Mwave=1600., return_binned=True,\n Mbins=Mbins, presets='hst', rest_wave=None, dlam=20.)\n else:\n _beta = np.zeros_like(Mbins) \n \n ax_bet.plot(Mbins, _beta, color=colors[j]) \n \n Mh = pop.get_field(z, 'Mh')\n Ms = pop.get_field(z, 'Ms')\n SFR = pop.get_field(z, 'SFR')\n \n SFE = pop.guide.SFE(z=z, Mh=_mh)\n \n ax_sfe.loglog(_mh, SFE, color=colors[j], alpha=0.8,\n label=r'$z={}$'.format(z))\n \n if (pop.pf['pop_scatter_mar'] > 0) or (pop.pf['pop_histories'] is not None):\n _bins = np.arange(7, 12.1, 0.1)\n x, y, std, N = bin_samples(np.log10(Ms), np.log10(SFR), _bins)\n ax_sfms.loglog(10**x, 10**y, color=colors[j])\n else: \n ax_sfms.loglog(Ms, SFR, color=colors[j])\n \n # SMF\n phi = pop.StellarMassFunction(z, _mst)\n ax_smf.loglog(10**_mst, phi, color=colors[j], drawstyle='steps-mid')\n\n # SMHM\n _Mh = 10**np.arange(8, 12.5, 0.1)\n fstar = pop.SMHM(z, _Mh, return_mean_only=True)\n #ax_smhm.loglog(_Mh, 10**fstar, color=colors[j])\n \n mags1500 = pop.Magnitude(z, wave=1500.)\n \n \n #mags = pop.Magnitude(z, wave=1600.)\n #if pop.pf['pop_dust_yield'] is not None:\n # beta = pop.Beta(z, Mwave=1600., return_binned=False)\n #else:\n # beta = np.zeros_like(mags)\n \n # M1500-Mstell\n _x, _y, _z, _N = bin_samples(mags1500, np.log10(Ms), Mbins)\n ax_MsMUV.plot(_x, _y, color=colors[j]) \n \n # Beta just to get 'mags'\n if pop.pf['pop_dust_yield'] in [0, None]:\n xa_f.append(0)\n xa_b.append(0)\n \n if pop.pf['dustcorr_method'] is not None:\n print(\"dustcorr_method={}\".format(pop.pf['dustcorr_method'])) \n ax_bet.plot(Mbins, dc1.Beta(z, Mbins), color=colors[j])\n \n continue\n \n \n Rdust = pop.guide.dust_scale(z=z, Mh=Mh)\n ydust = pop.guide.dust_yield(z=z, Mh=Mh)\n \n if pop.pf['pop_fduty'] is not None:\n fduty = pop.guide.fduty(z=z, Mh=Mh)\n else:\n fduty = np.zeros_like(Mh)\n \n #any_fcov = np.any(np.diff(fcov, axis=1) != 0)\n #any_fduty = np.any(np.diff(fduty, axis=1) != 0)\n \n if type(pop.pf['pop_dust_yield']) is str:\n ax_fco.semilogx(Mh, ydust, color=colors[j])\n ax_fco.set_ylabel(r'$y_{\\mathrm{dust}}$')\n elif type(pop.pf['pop_fduty']) is str:\n ax_fco.semilogx(Mh, fduty, color=colors[j])\n ax_fco.set_ylabel(r'$f_{\\mathrm{duty}}$')\n \n ax_rdu.loglog(Mh, Rdust, color=colors[j])\n\n Mbins = np.arange(-25, -10, 1.)\n AUV = pop.AUV(z, Mwave=1600., return_binned=True,\n magbins=Mbins)\n \n ax_AUV.plot(Mbins, AUV, color=colors[j])\n \n # LAE stuff\n #_x, _y, _z, _N = bin_samples(mags, fcov, Mbins)\n #ax_lae_m.plot(_x, 1. - _y, color=colors[j])\n #\n #faint = np.logical_and(Mbins >= -20.25, Mbins < -18.)\n #bright = Mbins < -20.25\n #\n #xa_f.append(1. - np.mean(_y[faint==1])) \n #xa_b.append(1. - np.mean(_y[bright==1]))\n \n #ax_lae_z.plot(redshifts, xa_b, color='k', alpha=1.0, ls='-')\n #ax_lae_z.plot(redshifts, xa_f, color='k', alpha=1.0, ls='--')\n \n zarr = np.arange(4, 25, 0.1)\n sfrd = np.array([pop.SFRD(zarr[i]) for i in range(zarr.size)])\n ax_sfrd.semilogy(zarr, sfrd * rhodot_cgs, color='k')\n \n def _MegaPlotChain(self, kw, anl, **kwargs):\n \"\"\"\n Plot many samples\n \"\"\"\n \n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n \n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n _mst = np.arange(6, 12, 0.2)\n _mags = np.arange(-25, -10, anl.base_kwargs['pop_mag_bin'])\n\n redshifts = [4, 6, 8, 10]\n colors = ['k', 'b', 'c', 'm', 'g', 'y', 'r']\n\n dc1 = DustCorrection(dustcorr_method='meurer1999',\n dustcorr_beta='bouwens2014')\n \n xa_b = []\n xa_f = []\n for j, z in enumerate(redshifts):\n \n # UVLF\n anl.ReconstructedFunction('galaxy_lf', ivar=[z, None], ax=ax_phi,\n color=colors[j], **kwargs)\n \n anl.ReconstructedFunction('fstar', ivar=[z, None], ax=ax_sfe,\n color=colors[j], **kwargs) \n \n if 'galaxy_smf' in anl.all_blob_names:\n anl.ReconstructedFunction('galaxy_smf', ivar=[z, None], ax=ax_smf,\n color=colors[j], is_logx=True, **kwargs)\n \n #if 'MUV_gm' in anl.all_blob_namess:\n # _z, _MUV = anl.get_ivars('MUV_gm')\n # k = np.argmin(np.abs(z - _z))\n # new_x = anl.ExtractData('MUV_gm')['MUV_gm'][:,k,:]\n # print(\"New magnitudes!!!!\")\n #else:\n new_x = None\n \n anl.ReconstructedFunction('sfrd', ivar=None, ax=ax_sfrd,\n color=colors[j], multiplier=rhodot_cgs, **kwargs)\n \n if 'pop_dust_yield' not in anl.base_kwargs:\n continue\n \n dtmr = anl.base_kwargs['pop_dust_yield']\n if (dtmr is None) or (dtmr == 0):\n continue\n \n anl.ReconstructedFunction('beta_hst', ivar=[z, None], ax=ax_bet,\n color=colors[j], new_x=new_x, **kwargs)\n \n anl.ReconstructedFunction('AUV', ivar=[z, None], ax=ax_AUV,\n color=colors[j], **kwargs)\n \n anl.ReconstructedFunction('dust_scale', ivar=[z, None], ax=ax_rdu,\n color=colors[j], **kwargs)\n \n if 'fduty' in anl.all_blob_names:\n anl.ReconstructedFunction('fduty', ivar=[z, None], ax=ax_fco,\n color=colors[j], **kwargs)\n \n \n def _MegaPlotLimitsAndTicks(self, kw):\n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n\n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n \n \n ax_sfe.set_xlim(1e8, 1e13)\n ax_sfe.set_ylim(1e-3, 1.0)\n ax_fco.set_xscale('log')\n ax_fco.set_xlim(1e8, 1e13)\n ax_fco.set_yscale('linear')\n ax_fco.set_ylim(0, 1.05)\n ax_rdu.set_xlim(1e8, 1e13)\n ax_rdu.set_ylim(1e-2, 100)\n \n ax_smf.set_xscale('log')\n ax_smf.set_xlim(1e7, 1e12)\n ax_smf.set_ylim(1e-7, 2e-1)\n #ax_smhm.set_xscale('log')\n #ax_smhm.set_yscale('log')\n #ax_smhm.set_ylim(-4, 1.)\n #ax_smhm.set_yscale('log', nonposy='clip')\n #ax_smhm.set_xlim(1e9, 1e12)\n #ax_smhm.set_ylim(5e-4, 1.5e-1)\n ax_bet.set_xlim(-25, -12)\n ax_bet.set_ylim(-3, -1)\n ax_phi.set_xlim(-25, -12)\n ax_phi.set_ylim(1e-7, 2e-1)\n \n ax_MsMUV.set_yscale('linear')\n ax_MsMUV.set_ylim(7, 12)\n ax_MsMUV.set_xlim(-25, -12)\n \n ax_AUV.set_xlim(-25, -12)\n ax_AUV.set_ylim(0, 3.5)\n\n ax_sfms.set_xlim(1e7, 1e12)\n ax_sfms.set_ylim(1e-2, 2e3)\n \n #ax_lae_m.set_xlim(-25, -12)\n #ax_lae_z.set_xlim(3., 7.2)\n #ax_lae_m.set_ylim(-0.05, 1.05)\n #ax_lae_z.set_ylim(-0.05, 1.05)\n\n ax_sfrd.set_yscale('log')\n ax_sfrd.set_xlim(4, 20)\n ax_sfrd.set_ylim(1e-4, 1e-1)\n\n # Set ticks for all MUV scales\n for ax in [ax_bet, ax_phi, ax_MsMUV, ax_AUV]:\n ax.set_xticks(np.arange(-24, -12, 1), minor=True)\n \n for ax in [ax_MsMUV, ax_AUV]:\n ax.set_xlim(-25, -15) \n \n return kw\n \n def _MegaPlotSetup(self, fig):\n \n fig = pl.figure(tight_layout=False, figsize=(22, 7), num=fig)\n #gs = gridspec.GridSpec(3, 10, hspace=0.3, wspace=1.0)\n gs = gridspec.GridSpec(3, 14, hspace=0.3, wspace=5.0)\n \n # Inputs\n ax_sfe = fig.add_subplot(gs[0,0:3])\n ax_rdu = fig.add_subplot(gs[1,0:3])\n ax_fco = fig.add_subplot(gs[2,0:3])\n \n # Rest UV stuff / calibration\n ax_phi = fig.add_subplot(gs[0:2,3:7])\n ax_bet = fig.add_subplot(gs[2,3:7])\n \n # Predictions\n ax_smf = fig.add_subplot(gs[0:2,7:11])\n ax_sfms = fig.add_subplot(gs[2,7:11])\n \n #ax_smhm = fig.add_subplot(gs[2,12:]) \n ax_AUV = fig.add_subplot(gs[0,11:14])\n ax_MsMUV = fig.add_subplot(gs[1,11:14])\n ax_sfrd = fig.add_subplot(gs[2,11:14])\n \n # Cal\n \n \n\n # Placeholder\n #ax_tau = fig.add_subplot(gs[0:1,9])\n \n kw = \\\n {\n 'ax_sfe': ax_sfe,\n 'ax_fco': ax_fco, \n 'ax_rdu': ax_rdu,\n 'ax_phi': ax_phi,\n 'ax_bet': ax_bet,\n 'ax_smf': ax_smf,\n 'ax_MsMUV': ax_MsMUV,\n 'ax_AUV': ax_AUV, \n 'ax_sfrd': ax_sfrd,\n 'ax_sfms': ax_sfms,\n }\n \n return kw\n \n def _MegaPlotCalData(self, kw, redshifts=None):\n \n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n \n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n \n l11 = read_lit('lee2011')\n b14 = read_lit('bouwens2014')\n \n # Vanilla dust model\n dc1 = DustCorrection(dustcorr_method='meurer1999',\n dustcorr_beta='bouwens2014')\n #devol = ares.util.ParameterBundle('dust:evolving')\n #dc2 = ares.phenom.DustCorrection(**devol)\n #dc3 = DustCorrection(dustcorr_method='pettini1998',\n # dustcorr_beta='bouwens2014')\n \n\n # Redshifts and color scheme\n if redshifts is None:\n redshifts = [4, 6, 8, 10]\n \n colors = ['k', 'b', 'c', 'm', 'g', 'y', 'r']\n mkw = {'capthick': 1, 'elinewidth': 1, 'alpha': 1.0, 'capsize': 1}\n\n # UVLF and Beta\n for j, z in enumerate(redshifts):\n self.PlotLF(z, ax=ax_phi, sources=['bouwens2015'],\n round_z=0.21, color=colors[j], mec=colors[j], mfc=colors[j], fmt='o',\n label='Bouwens+ 2015' if j == 0 else None, **mkw)\n self.PlotLF(z, ax=ax_phi, sources=['oesch2018'],\n round_z=0.21, color=colors[j], mec=colors[j], mfc=colors[j], fmt='d',\n label='Oesch+ 2018' if j == 0 else None, **mkw) \n self.PlotLF(z, ax=ax_phi, sources=['finkelstein2015'],\n round_z=0.21, color=colors[j], mec=colors[j], mfc='none', mew=1, fmt='s',\n label='Finkelstein+ 2015' if j == 0 else None, **mkw) \n self.PlotSMF(z, ax=ax_smf, sources=['song2016'],\n round_z=0.11, color=colors[j], mec=colors[j], mfc=colors[j], mew=1, fmt='o',\n label='Song+ 2016' if j == 0 else None, **mkw) \n self.PlotSMF(z, ax=ax_smf, sources=['stefanon2017'], mew=1, fmt='s',\n round_z=0.11, color=colors[j], mec=colors[j], mfc='none',\n label='Stefanon+ 2017' if j == 0 else None, **mkw)\n\n if z in b14.data['beta']:\n \n err = b14.data['beta'][z]['err'] + b14.data['beta'][z]['sys']\n ax_bet.errorbar(b14.data['beta'][z]['M'], b14.data['beta'][z]['beta'], err, \n fmt='o', color=colors[j], label=r'Bouwens+ 2014' if j == 0 else None,\n **mkw)\n \n if z in l11.data['beta']:\n ax_bet.errorbar(l11.data['beta'][z]['M'], l11.data['beta'][z]['beta'], \n l11.data['beta'][z]['err'], \n fmt='*', color=colors[j], label=r'Lee+ 2011' if j == 0 else None,\n **mkw)\n \n # Plot vanilla dust correction\n ax_AUV.plot(np.arange(-25, -15, 0.1), \n dc1.AUV(z, np.arange(-25, -15, 0.1)), \n color=colors[j], ls=':', \n label=r'M99+B14 IRX-$\\beta + M_{\\mathrm{UV}}-\\beta$' if j == 0 else None) \n \n \n #ax_AUV.plot(np.arange(-25, -14, 2.), dc2.AUV(z, np.arange(-25, -14, 2.)), \n # color=colors[j], ls='--', \n # label=r'evolving IRX-$\\beta + M_{\\mathrm{UV}}-\\beta$' if j == 0 else None) \n #ax_AUV.plot(np.arange(-25, -14, 2.), dc3.AUV(z, np.arange(-25, -14, 2.)), \n # color=colors[j], ls='-.', \n # label=r'P98+B14 IRX-$\\beta + M_{\\mathrm{UV}}-\\beta$' if j == 0 else None) \n \n def _MegaPlotGuideEye(self, kw, redshifts=None):\n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n \n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n ax_rdu.annotate(r'$R_h \\propto M_h^{1/3} (1+z)^{-1}$', (1.5e8, 30))\n \n if redshifts is None:\n redshifts = [4, 6, 8, 10]\n \n colors = ['k', 'b', 'c', 'm', 'g', 'y', 'r']\n \n # Show different Mh slopes \n mh = np.logspace(8, 9, 50)\n \n # How Rdust would scale if it were proportional with halo size\n for j, z in enumerate(redshifts):\n ax_rdu.loglog(mh, 5. * (mh / 1e8)**0.333 * (1. + 4.) / (1. + z), color=colors[j], \n lw=1, ls='-', alpha=0.5)\n \n \n func = lambda z, A: 5e-2 * (mh / 1e8)**A #* (1. + 4.) / (1. + zz)**B\n ax_sfe.loglog(mh, func(4., 1./3.), \n color='k', lw=1, ls='-', alpha=0.5) \n ax_sfe.loglog(mh, func(4., 2./3.), \n color='k', lw=1, ls='-', alpha=0.5)\n ax_sfe.loglog(mh, func(4., 3./3.), \n color='k', lw=1, ls='-', alpha=0.5) \n ax_sfe.annotate(r'$1/3$', (mh[-1]*1.1, func(4., 1./3.)[-1]), ha='left') \n ax_sfe.annotate(r'$2/3$', (mh[-1]*1.1, func(4., 2./3.)[-1]), ha='left') \n ax_sfe.annotate(r'$1$', (mh[-1]*1.1, func(4., 3./3.)[-1]), ha='left')\n \n # Show different z-dep\n ax_sfe.scatter(np.ones_like(redshifts) * 1e10, 4e-3 * ((1. + np.array(redshifts)) / 9.),\n color=colors, facecolors='none', marker='s', s=5) \n ax_sfe.scatter(np.ones_like(redshifts) * 1e11, 4e-3 * np.sqrt(((1. + np.array(redshifts)) / 9.)),\n color=colors, facecolors='none', marker='s', s=5) \n ax_sfe.annotate(r'$(1+z)$', (1e10, 5e-3), ha='center', va='bottom', \n rotation=0, fontsize=8)\n ax_sfe.annotate(r'$\\sqrt{1+z}$', (1e11, 5e-3), ha='center', va='bottom', \n rotation=0, fontsize=8)\n \n\n ax_phi.legend(loc='lower right', fontsize=8)\n ax_smf.legend(loc='lower left', fontsize=8)\n ax_bet.legend(loc='upper right', fontsize=8)\n ax_AUV.legend(loc='upper right', fontsize=8)\n\n\n # Show different z-dep\n ax_sfms.scatter(np.ones_like(redshifts) * 2e9, 1e-1 * ((1. + np.array(redshifts)) / 9.)**1.5,\n color=colors, facecolors='none', marker='s', s=5) \n ax_sfms.annotate(r'$(1+z)^{3/2}$', (2e9, 1.5e-1), ha='center', va='bottom', \n rotation=0, fontsize=8)\n ax_sfms.scatter(np.ones_like(redshifts) * 2e10, 1e-1 * ((1. + np.array(redshifts)) / 9.)**2.5,\n color=colors, facecolors='none', marker='s', s=5) \n ax_sfms.annotate(r'$(1+z)^{5/2}$', (2e10, 1.5e-1), ha='center', va='bottom', \n rotation=0, fontsize=8)\n\n mh = np.logspace(7., 8, 50)\n ax_sfms.loglog(mh, 200 * func(4., 3./3.), \n color=colors[0], lw=1, ls='-', alpha=0.5) \n ax_sfms.annotate(r'$1$', (mh[-1]*1.1, 200 * func(4., 3./3.)[-1]), ha='left')\n \n def _MegaPlotPredData(self, kw, redshifts=None):\n \n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n mkw = {'capthick': 1, 'elinewidth': 1, 'alpha': 1.0, 'capsize': 1}\n \n if redshifts is None:\n redshifts = [4, 6, 8, 10]\n \n colors = ['k', 'b', 'c', 'm', 'g', 'y', 'r']\n \n xarr = np.arange(-22, -18, 0.5)\n yarr = [0.1, 0.08, 0.08, 0.1, 0.18, 0.3, 0.47, 0.6]\n yerr = [0.1, 0.05, 0.03, 0.05, 0.05, 0.1, 0.15, 0.2]\n #ax_lae_m.errorbar(xarr, yarr, yerr=yerr, color='k', \n # label='Stark+ 2010 (3 < z < 6.2)', fmt='o', **mkw)\n\n zlist = [4., 5, 6.1]\n x25_b = [0.13, 0.25, 0.2]\n x25_f = [0.35, 0.48, 0.55]\n err_b = [0.05, 0.05, 0.08]\n err_f = [0.05, 0.1, 0.15]\n \n #_colors = 'k', 'g', 'b'\n #for j, z in enumerate(zlist):\n # ax_lae_z.errorbar(zlist[j], x25_b[j], yerr=err_b[j], \n # color=_colors[j], ms=5, \n # label=r'Stark+ 2011' if j == 0 else None,\n # fmt='s', mfc='none', **mkw)\n # ax_lae_z.errorbar(zlist[j], x25_f[j], yerr=err_f[j],\n # color=_colors[j], ms=5,\n # fmt='o', mfc='none', **mkw)\n\n \n ## De Barros et al. (2017) \n #ax_lae_z.errorbar(5.9, 0.1, 0.05, color='b', fmt='*', mfc='none', ms=5,\n # label=r'deBarros+ 2017', **mkw)\n #ax_lae_z.errorbar(5.9, 0.38, 0.12, color='b', fmt='*', mfc='none', ms=5,\n # **mkw)\n #\n #ax_lae_z.legend(loc='upper left', frameon=True, fontsize=6)\n #ax_lae_m.legend(loc='upper left', frameon=True, fontsize=6)\n\n # Salmon et al. 2015\n data = \\\n {\n 4: {'MUV': np.arange(-21.5, -18, 0.5),\n 'Ms': [9.61, 9.5, 9.21, 9.13, 8.96, 8.81, 8.75],\n 'err': [0.39, 0.57, 0.47, 0.51, 0.56, 0.53, 0.57]},\n 5: {},\n 6: {'MUV': np.arange(-21.5, -18.5, 0.5),\n 'Ms': [9.34, 9.23, 9.21, 9.14, 8.90, 8.77],\n 'err': [0.44, 0.38, 0.41, 0.38, 0.38, 0.47]},\n }\n \n for j, z in enumerate(redshifts):\n if z not in data:\n continue\n \n if ('MUV' not in data[z]) or ('Ms' not in data[z]):\n continue\n \n ax_MsMUV.errorbar(data[z]['MUV'], data[z]['Ms'], yerr=data[z]['err'],\n color=colors[j], label='Salmon+ 2015' if j==0 else None, \n fmt='o', mfc='none', **mkw)\n\n ax_MsMUV.legend(loc='upper right', fontsize=8)\n \n def _MegaPlotCleanup(self, kw):\n \n \n ax_sfe = kw['ax_sfe']\n ax_fco = kw['ax_fco']\n ax_rdu = kw['ax_rdu']\n ax_phi = kw['ax_phi']\n ax_bet = kw['ax_bet']\n \n \n ax_smf = kw['ax_smf']\n #ax_smhm = kw['ax_smhm']\n ax_MsMUV = kw['ax_MsMUV']\n ax_AUV = kw['ax_AUV']\n ax_sfrd = kw['ax_sfrd']\n #ax_lae_z = kw['ax_lae_z']\n #ax_lae_m = kw['ax_lae_m']\n ax_sfms = kw['ax_sfms']\n \n ax_sfe.set_title('Model Inputs', fontsize=18)\n ax_sfe.set_ylabel(r'$f_{\\ast} \\equiv \\dot{M}_{\\ast} / f_b \\dot{M}_h$')\n \n ax_fco.set_ylabel(r'$f_{\\mathrm{duty}}$')\n ax_fco.set_xlabel(r'$M_h / M_{\\odot}$')\n \n ax_rdu.set_ylabel(r'$R_{\\mathrm{dust}} \\ [\\mathrm{kpc}]$')\n \n ax_AUV.set_title('Predictions', fontsize=18)\n ax_smf.set_title('Predictions', fontsize=18)\n\n ax_smf.set_ylabel(labels['galaxy_smf'])\n #ax_smhm.set_xlabel(r'$M_h / M_{\\odot}$')\n #ax_smhm.set_ylabel(r'$M_{\\ast} / M_h$')\n ax_phi.set_ylabel(labels['galaxy_lf'])\n ax_phi.set_yscale('log')\n ax_bet.set_ylabel(r'$\\beta$')\n\n ax_MsMUV.set_ylabel(r'$\\log_{10} M_{\\ast} / M_{\\odot}$')\n ax_MsMUV.set_xlabel(r'$M_{1500}$')\n\n ax_AUV.set_xlabel(r'$M_{\\mathrm{UV}}$')\n ax_AUV.set_ylabel(r'$A_{\\mathrm{UV}}$')\n \n ax_smf.set_yscale('log')\n \n ax_sfms.set_xlabel(r'$M_{\\ast} / M_{\\odot}$')\n ax_sfms.set_ylabel(r'$\\dot{M}_{\\ast} \\ [M_{\\odot} \\ \\mathrm{yr}^{-1}]$')\n \n ax_sfrd.set_xlabel(r'$z$')\n ax_sfrd.set_ylabel(labels['sfrd'])\n ax_sfrd.set_ylim(1e-4, 1e-1)\n\n #ax_lae_z.set_xlabel(r'$z$')\n #ax_lae_z.set_ylabel(r'$X_{\\mathrm{LAE}}, 1 - f_{\\mathrm{cov}}$')\n #ax_lae_m.set_xlabel(r'$M_{\\mathrm{UV}}$')\n #ax_lae_m.set_ylabel(r'$X_{\\mathrm{LAE}}, 1 - f_{\\mathrm{cov}}$')\n \n\n ##\n # CALIBRATION DATA\n ##\n ax_phi.set_title('Calibration Data', fontsize=18)\n ax_bet.set_xlabel(r'$M_{\\mathrm{UV}}$')\n ax_phi.set_ylabel(labels['lf'])\n ax_bet.set_ylabel(r'$\\beta$')\n\n ax_phi.legend(loc='lower right', fontsize=8)\n ax_smf.legend(loc='lower left', fontsize=8)\n ax_bet.legend(loc='lower left', fontsize=8)\n ax_AUV.legend(loc='upper right', fontsize=8)\n #ax_lae_z.legend(loc='upper left', frameon=True, fontsize=6)\n #ax_lae_m.legend(loc='upper left', frameon=True, fontsize=6)\n ax_MsMUV.legend(loc='upper right', fontsize=8)\n \n \n self._MegaPlotLimitsAndTicks(kw)\n"
] | [
[
"numpy.concatenate",
"numpy.max",
"numpy.zeros_like",
"numpy.mean",
"numpy.ma.array",
"numpy.ma.is_masked",
"numpy.ones_like",
"numpy.arange",
"numpy.diff",
"matplotlib.gridspec.GridSpec",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.min",
"numpy.logspace",
"numpy.log10",
"numpy.logical_and",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.sum",
"numpy.abs",
"numpy.array_equal",
"numpy.isfinite",
"numpy.sort",
"matplotlib.pyplot.draw",
"matplotlib.cm.get_cmap"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wiprodevnet/iot-ml-deployment | [
"78b6f3ad9d65f4f2c3e1654b8f01be941a17b572"
] | [
"IOTMLmodel/iot_ml_model.py"
] | [
"\"\"\"\r\nLinear Regression model data preperation and storage as pickle model\r\n\"\"\"\r\nimport pickle\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn import metrics\r\n\r\n# Linear Regression Model implementation\r\ndef train_test_rmse(feature_cols):\r\n \"\"\" Define a function that accepts a list of features and returns model and RMSE \"\"\"\r\n # create X and y\r\n x_input = ENERGY[feature_cols]\r\n y_output = ENERGY.Global_active_power\r\n x_train, x_test, y_train, y_test = train_test_split(x_input, y_output, random_state=123)\r\n linreg = LinearRegression()\r\n linreg.fit(x_train, y_train)\r\n y_pred = linreg.predict(x_test)\r\n\r\n return (linreg, np.sqrt(metrics.mean_squared_error(y_test, y_pred)))\r\n\r\n# IOT dataset downloaded from https://data.world/databeats/household-power-consumption\r\n\r\n# Read the csv file to pandas dataframe & clean the data\r\n# As there are some na values with ? mark these as na values\r\nENERGY = pd.read_csv('household_power_consumption.csv', na_values='?')\r\n\r\n# Drop null values\r\nENERGY.dropna(inplace=True)\r\n\r\n# Add the 'datetime' as the index cloumn and derive a new 'hour' column\r\nENERGY['datetime'] = pd.to_datetime(ENERGY['Date']+' '+ ENERGY['Time'])\r\n# Set datetime name as index column\r\nENERGY.set_index(\"datetime\", inplace=True)\r\n# Remove old date time columns\r\nENERGY = ENERGY.drop(['Date', 'Time'], axis=1)\r\n# Derive time of the day (hour) to add this as a feature for linear regression\r\nENERGY['hour'] = ENERGY.index.hour\r\n\r\n# Create dummy variables based on hour\r\nHOUR_DUMMIES = pd.get_dummies(ENERGY.hour, prefix='hour')\r\n\r\n# Drop redundant categorical column - hour_0\r\n# Note for N categories, it is enough to have N-1 as features\r\nHOUR_DUMMIES.drop(HOUR_DUMMIES.columns[0], axis=1, inplace=True)\r\n\r\n# Concatenate the original DataFrame and the dummy DataFrame\r\nENERGY = pd.concat([ENERGY, HOUR_DUMMIES], axis=1)\r\n\r\n# Model #1 Simple model\r\nprint('Model #1 Simple univariant model ')\r\n# Train and test the liner regression model using 'hour' as feature\r\n# and active power as output\r\n(MODEL, ERROR) = train_test_rmse(['hour'])\r\nINPUT = np.array([[0]])\r\n\r\n# Print the mse error on the test data\r\nprint('MSE', ERROR)\r\n# Predict the out output for given input\r\nPREDICTION = MODEL.predict(INPUT)\r\n# Print the linear MODEL cofficients and output\r\nprint('intercept_', MODEL.intercept_)\r\nprint('coef_', MODEL.coef_)\r\nprint('Predicted', PREDICTION)\r\n\r\n#Store the model as a pkl for deployment\r\nFILE = open('iotmodel.pkl', 'wb')\r\npickle.dump(MODEL, FILE)\r\nFILE.close()\r\n\r\n# Model #2 Higher polynomial model with categorical columns\r\nprint('Model #2 Multiple categories model ')\r\nFEATURE_COL = ENERGY.columns[ENERGY.columns.str.startswith('hour_')]\r\nprint(FEATURE_COL)\r\n\r\n# Let us train and test the liner regression model using all categorical cloumns 'hour_' as feature\r\n# and active power as output\r\n(MODEL, ERROR) = train_test_rmse(FEATURE_COL)\r\n\r\nINPUT = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]])\r\n\r\n# Print the mse error on the test data\r\nprint('MSE', ERROR)\r\n# Predict the out output for given input\r\nPREDICTION = MODEL.predict(INPUT)\r\n# Print the linear MODEL cofficients and output\r\nprint('intercept_', MODEL.intercept_)\r\nprint('coef_', MODEL.coef_)\r\nprint('Predicted', PREDICTION)\r\n\r\n# Store the second model as a pkl for deployment\r\nFILE = open('iotmodel2.pkl', 'wb')\r\npickle.dump(MODEL, FILE)\r\nFILE.close()\r\n"
] | [
[
"pandas.concat",
"pandas.read_csv",
"pandas.to_datetime",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.mean_squared_error",
"sklearn.linear_model.LinearRegression",
"numpy.array",
"pandas.get_dummies"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
dkdocs/nextera-demo | [
"9881f42c68e96d48ae9c8a33eb11752d62c6e9d8"
] | [
"mvtowerdetection/test/show_annotated_images.py"
] | [
"import os\nimport cv2\nimport numpy as np\nimport skimage.io as io\n\nfrom train.utils import get_subfiles\n\nfrom dotenv import load_dotenv\nload_dotenv()\n\n\nUnlabelled = [0, 0, 0]\nPole = [255, 255, 255]\n\nCOLOR_DICT = np.array([Pole, Unlabelled])\n\n\n# get binary image\ndef get_binary_image(img):\n # convert to grayscale\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # convert to binary\n retval, binary_img = cv2.threshold(gray_img, float(os.getenv('BINARY_THRESHOLD')), 255, cv2.THRESH_BINARY)\n\n binary_img = np.repeat(binary_img[:, :, np.newaxis], 3, axis=2)\n return binary_img\n\n\n# apply color map to grayscale image\ndef apply_color_map(img):\n # convert to grayscale\n gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # apply color map\n img_colormap = cv2.applyColorMap(gray_img, int(os.getenv('COLORMAP')))\n\n return img_colormap\n\n\n# rescale image\ndef rescale_image(img, target_size):\n rescaled_img = cv2.resize(img, (target_size[1], target_size[0]))\n return rescaled_img\n\n\n# get target size\ndef get_image_dimensions(path):\n img = cv2.imread(path)\n return img.shape\n\n\n# display images and their masks\ndef save_images_and_masks(imagesdir, masksdir, suffix='_predict', show=False,\n save=False):\n all_mask_images = get_subfiles(masksdir)\n counter = 1\n for maskfilename in all_mask_images:\n image_id = maskfilename.split('.')[0].replace(suffix, '')\n imagefilename = image_id + '.jpg'\n\n img = cv2.imread(imagesdir + '/' + imagefilename)\n mask = cv2.imread(masksdir + '/' + maskfilename)\n\n if os.getenv('COLORMAP_FLAG').upper() == 'TRUE':\n mask_modified = apply_color_map(mask)\n else:\n mask_modified = get_binary_image(mask)\n\n combined_img = np.concatenate((img, mask_modified), axis=1)\n\n if show:\n cv2.imshow(\"annotations - Filename: {}\".format(image_id), combined_img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n\n if save:\n print('\\ncounter: {}'.format(counter))\n print('results/combined/{}'.format(str(image_id) + '.png'))\n cv2.imwrite(os.path.join(os.getenv('COMBINED_IMAGES_PATH'), (str(image_id) + '.png')), combined_img)\n counter += 1\n\n# get image from model predictions\ndef label_visualize(num_class, color_dict, img):\n img = img[:, :, 0] if len(img.shape) == 3 else img\n img_out = np.zeros(img.shape + (3,))\n for i in range(num_class):\n img_out[img == i, :] = color_dict[i]\n return img_out / 255\n\n\n# save predictions on test data as image files\ndef save_result(save_path, npyfile, test_filenames, flag_multi_class=False, num_class=2):\n for i, item in enumerate(npyfile):\n img = label_visualize(num_class, COLOR_DICT, item) if flag_multi_class else item[:, :, 0]\n rescaled_img = rescale_image(img, target_size=get_image_dimensions(os.path.join(os.getenv('TEST_IMAGES_PATH'), test_filenames[i])))\n print(get_image_dimensions(os.path.join(os.getenv('TEST_IMAGES_PATH'), test_filenames[i])))\n print(rescaled_img.shape)\n\n io.imsave(os.path.join(save_path, \"{}_predict.png\".format(test_filenames[i].split('.')[0])), rescaled_img)\n"
] | [
[
"numpy.concatenate",
"numpy.repeat",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gaolympie/datacraft-workshop-docker | [
"570133f1ff54ac5251c30cb9b0815bc7f0c08f09"
] | [
"utils/ui.py"
] | [
"import numpy as np\nimport streamlit as st\n\n\nfrom models.NaiveBayes import nb_param_selector\nfrom models.NeuralNetwork import nn_param_selector\nfrom models.RandomForet import rf_param_selector\nfrom models.DecisionTree import dt_param_selector\nfrom models.LogisticRegression import lr_param_selector\nfrom models.KNearesNeighbors import knn_param_selector\nfrom models.SVC import svc_param_selector\nfrom models.GradientBoosting import gb_param_selector\n\nfrom models.utils import model_imports\nfrom utils.functions import img_to_bytes\n\n\ndef introduction():\n st.title(\"**Welcome to playground 🧪**\")\n st.subheader(\n \"\"\"\n This is a place where you can get familiar with machine learning models directly from your browser\n \"\"\"\n )\n\n st.markdown(\n \"\"\"\n - 🗂️ Choose a dataset\n - ⚙️ Pick a model and set its hyper-parameters\n - 📉 Train it and check its performance metrics and decision boundary on train and test data\n - 🩺 Diagnose possible overitting and experiment with other settings\n -----\n \"\"\"\n )\n\n\ndef dataset_selector():\n dataset_container = st.sidebar.beta_expander(\"Configure a dataset\", True)\n with dataset_container:\n dataset = st.selectbox(\"Choose a dataset\", (\"moons\", \"circles\", \"blobs\"))\n n_samples = st.number_input(\n \"Number of samples\",\n min_value=50,\n max_value=1000,\n step=10,\n value=300,\n )\n\n train_noise = st.slider(\n \"Set the noise (train data)\",\n min_value=0.01,\n max_value=0.2,\n step=0.005,\n value=0.06,\n )\n test_noise = st.slider(\n \"Set the noise (test data)\",\n min_value=0.01,\n max_value=1.0,\n step=0.005,\n value=train_noise,\n )\n\n if dataset == \"blobs\":\n n_classes = st.number_input(\"centers\", 2, 5, 2, 1)\n else:\n n_classes = None\n\n return dataset, n_samples, train_noise, test_noise, n_classes\n\n\ndef model_selector():\n model_training_container = st.sidebar.beta_expander(\"Train a model\", True)\n with model_training_container:\n model_type = st.selectbox(\n \"Choose a model\",\n (\n \"Logistic Regression\",\n \"Decision Tree\",\n \"Random Forest\",\n \"Gradient Boosting\",\n \"Neural Network\",\n \"K Nearest Neighbors\",\n \"Gaussian Naive Bayes\",\n \"SVC\",\n ),\n )\n\n if model_type == \"Logistic Regression\":\n model = lr_param_selector()\n\n elif model_type == \"Decision Tree\":\n model = dt_param_selector()\n\n elif model_type == \"Random Forest\":\n model = rf_param_selector()\n\n elif model_type == \"Neural Network\":\n model = nn_param_selector()\n\n elif model_type == \"K Nearest Neighbors\":\n model = knn_param_selector()\n\n elif model_type == \"Gaussian Naive Bayes\":\n model = nb_param_selector()\n\n elif model_type == \"SVC\":\n model = svc_param_selector()\n\n elif model_type == \"Gradient Boosting\":\n model = gb_param_selector()\n\n return model_type, model\n\n\ndef generate_snippet(\n model, model_type, n_samples, train_noise, test_noise, dataset, degree\n):\n train_noise = np.round(train_noise, 3)\n test_noise = np.round(test_noise, 3)\n\n model_text_rep = repr(model)\n model_import = model_imports[model_type]\n\n if degree > 1:\n feature_engineering = f\"\"\"\n >>> for d in range(2, {degree+1}):\n >>> x_train = np.concatenate((x_train, x_train[:, 0] ** d, x_train[:, 1] ** d))\n >>> x_test= np.concatenate((x_test, x_test[:, 0] ** d, x_test[:, 1] ** d))\n \"\"\"\n\n if dataset == \"moons\":\n dataset_import = \"from sklearn.datasets import make_moons\"\n train_data_def = (\n f\"x_train, y_train = make_moons(n_samples={n_samples}, noise={train_noise})\"\n )\n test_data_def = f\"x_test, y_test = make_moons(n_samples={n_samples // 2}, noise={test_noise})\"\n\n elif dataset == \"circles\":\n dataset_import = \"from sklearn.datasets import make_circles\"\n train_data_def = f\"x_train, y_train = make_circles(n_samples={n_samples}, noise={train_noise})\"\n test_data_def = f\"x_test, y_test = make_circles(n_samples={n_samples // 2}, noise={test_noise})\"\n\n elif dataset == \"blobs\":\n dataset_import = \"from sklearn.datasets import make_blobs\"\n train_data_def = f\"x_train, y_train = make_blobs(n_samples={n_samples}, clusters=2, noise={train_noise* 47 + 0.57})\"\n test_data_def = f\"x_test, y_test = make_blobs(n_samples={n_samples // 2}, clusters=2, noise={test_noise* 47 + 0.57})\"\n\n snippet = f\"\"\"\n >>> {dataset_import}\n >>> {model_import}\n >>> from sklearn.metrics import accuracy_score, f1_score\n\n >>> {train_data_def}\n >>> {test_data_def}\n {feature_engineering if degree > 1 else ''} \n >>> model = {model_text_rep}\n >>> model.fit(x_train, y_train)\n \n >>> y_train_pred = model.predict(x_train)\n >>> y_test_pred = model.predict(x_test)\n >>> train_accuracy = accuracy_score(y_train, y_train_pred)\n >>> test_accuracy = accuracy_score(y_test, y_test_pred)\n \"\"\"\n return snippet\n\n\ndef polynomial_degree_selector():\n return st.sidebar.number_input(\"Highest polynomial degree\", 1, 10, 1, 1)\n\n\ndef footer():\n st.sidebar.markdown(\"---\")\n st.sidebar.markdown(\n \"\"\"\n App forked from [ahmedbesbes/playground](https://github.com/ahmedbesbes/playground)\n \n [<img src='data:image/png;base64,{}' class='img-fluid' width=25 height=25>](https://github.com/ahmedbesbes/playground) <small> Playground 0.1.1 | April 2021</small>\"\"\".format(\n img_to_bytes(\"./images/github.png\")\n \n ),\n unsafe_allow_html=True,\n )"
] | [
[
"numpy.round"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huozhanfeng/spark | [
"42904b8d013e71d03e301c3da62e33b4cc2eb54e"
] | [
"python/pyspark/mllib/util.py"
] | [
"#\n# Licensed to the Apache Software Foundation (ASF) under one or more\n# contributor license agreements. See the NOTICE file distributed with\n# this work for additional information regarding copyright ownership.\n# The ASF licenses this file to You under the Apache License, Version 2.0\n# (the \"License\"); you may not use this file except in compliance with\n# the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport numpy as np\nimport warnings\n\nfrom pyspark.mllib.linalg import Vectors, SparseVector\nfrom pyspark.mllib.regression import LabeledPoint\nfrom pyspark.mllib._common import _convert_vector, _deserialize_labeled_point\nfrom pyspark.rdd import RDD\nfrom pyspark.serializers import NoOpSerializer\n\n\nclass MLUtils(object):\n\n \"\"\"\n Helper methods to load, save and pre-process data used in MLlib.\n \"\"\"\n\n @staticmethod\n def _parse_libsvm_line(line, multiclass):\n warnings.warn(\"deprecated\", DeprecationWarning)\n return _parse_libsvm_line(line)\n\n @staticmethod\n def _parse_libsvm_line(line):\n \"\"\"\n Parses a line in LIBSVM format into (label, indices, values).\n \"\"\"\n items = line.split(None)\n label = float(items[0])\n nnz = len(items) - 1\n indices = np.zeros(nnz, dtype=np.int32)\n values = np.zeros(nnz)\n for i in xrange(nnz):\n index, value = items[1 + i].split(\":\")\n indices[i] = int(index) - 1\n values[i] = float(value)\n return label, indices, values\n\n @staticmethod\n def _convert_labeled_point_to_libsvm(p):\n \"\"\"Converts a LabeledPoint to a string in LIBSVM format.\"\"\"\n items = [str(p.label)]\n v = _convert_vector(p.features)\n if type(v) == np.ndarray:\n for i in xrange(len(v)):\n items.append(str(i + 1) + \":\" + str(v[i]))\n elif type(v) == SparseVector:\n nnz = len(v.indices)\n for i in xrange(nnz):\n items.append(str(v.indices[i] + 1) + \":\" + str(v.values[i]))\n else:\n raise TypeError(\"_convert_labeled_point_to_libsvm needs either ndarray or SparseVector\"\n \" but got \" % type(v))\n return \" \".join(items)\n\n @staticmethod\n def loadLibSVMFile(sc, path, multiclass=False, numFeatures=-1, minPartitions=None):\n warnings.warn(\"deprecated\", DeprecationWarning)\n return loadLibSVMFile(sc, path, numFeatures, minPartitions)\n\n @staticmethod\n def loadLibSVMFile(sc, path, numFeatures=-1, minPartitions=None):\n \"\"\"\n Loads labeled data in the LIBSVM format into an RDD of\n LabeledPoint. The LIBSVM format is a text-based format used by\n LIBSVM and LIBLINEAR. Each line represents a labeled sparse\n feature vector using the following format:\n\n label index1:value1 index2:value2 ...\n\n where the indices are one-based and in ascending order. This\n method parses each line into a LabeledPoint, where the feature\n indices are converted to zero-based.\n\n @param sc: Spark context\n @param path: file or directory path in any Hadoop-supported file\n system URI\n @param numFeatures: number of features, which will be determined\n from the input data if a nonpositive value\n is given. This is useful when the dataset is\n already split into multiple files and you\n want to load them separately, because some\n features may not present in certain files,\n which leads to inconsistent feature\n dimensions.\n @param minPartitions: min number of partitions\n @return: labeled data stored as an RDD of LabeledPoint\n\n >>> from tempfile import NamedTemporaryFile\n >>> from pyspark.mllib.util import MLUtils\n >>> tempFile = NamedTemporaryFile(delete=True)\n >>> tempFile.write(\"+1 1:1.0 3:2.0 5:3.0\\\\n-1\\\\n-1 2:4.0 4:5.0 6:6.0\")\n >>> tempFile.flush()\n >>> examples = MLUtils.loadLibSVMFile(sc, tempFile.name).collect()\n >>> tempFile.close()\n >>> type(examples[0]) == LabeledPoint\n True\n >>> print examples[0]\n (1.0,(6,[0,2,4],[1.0,2.0,3.0]))\n >>> type(examples[1]) == LabeledPoint\n True\n >>> print examples[1]\n (-1.0,(6,[],[]))\n >>> type(examples[2]) == LabeledPoint\n True\n >>> print examples[2]\n (-1.0,(6,[1,3,5],[4.0,5.0,6.0]))\n \"\"\"\n\n lines = sc.textFile(path, minPartitions)\n parsed = lines.map(lambda l: MLUtils._parse_libsvm_line(l))\n if numFeatures <= 0:\n parsed.cache()\n numFeatures = parsed.map(lambda x: -1 if x[1].size == 0 else x[1][-1]).reduce(max) + 1\n return parsed.map(lambda x: LabeledPoint(x[0], Vectors.sparse(numFeatures, x[1], x[2])))\n\n @staticmethod\n def saveAsLibSVMFile(data, dir):\n \"\"\"\n Save labeled data in LIBSVM format.\n\n @param data: an RDD of LabeledPoint to be saved\n @param dir: directory to save the data\n\n >>> from tempfile import NamedTemporaryFile\n >>> from fileinput import input\n >>> from glob import glob\n >>> from pyspark.mllib.util import MLUtils\n >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, 1.23), (2, 4.56)])), \\\n LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]\n >>> tempFile = NamedTemporaryFile(delete=True)\n >>> tempFile.close()\n >>> MLUtils.saveAsLibSVMFile(sc.parallelize(examples), tempFile.name)\n >>> ''.join(sorted(input(glob(tempFile.name + \"/part-0000*\"))))\n '0.0 1:1.01 2:2.02 3:3.03\\\\n1.1 1:1.23 3:4.56\\\\n'\n \"\"\"\n lines = data.map(lambda p: MLUtils._convert_labeled_point_to_libsvm(p))\n lines.saveAsTextFile(dir)\n\n @staticmethod\n def loadLabeledPoints(sc, path, minPartitions=None):\n \"\"\"\n Load labeled points saved using RDD.saveAsTextFile.\n\n @param sc: Spark context\n @param path: file or directory path in any Hadoop-supported file\n system URI\n @param minPartitions: min number of partitions\n @return: labeled data stored as an RDD of LabeledPoint\n\n >>> from tempfile import NamedTemporaryFile\n >>> from pyspark.mllib.util import MLUtils\n >>> examples = [LabeledPoint(1.1, Vectors.sparse(3, [(0, -1.23), (2, 4.56e-7)])), \\\n LabeledPoint(0.0, Vectors.dense([1.01, 2.02, 3.03]))]\n >>> tempFile = NamedTemporaryFile(delete=True)\n >>> tempFile.close()\n >>> sc.parallelize(examples, 1).saveAsTextFile(tempFile.name)\n >>> loaded = MLUtils.loadLabeledPoints(sc, tempFile.name).collect()\n >>> type(loaded[0]) == LabeledPoint\n True\n >>> print examples[0]\n (1.1,(3,[0,2],[-1.23,4.56e-07]))\n >>> type(examples[1]) == LabeledPoint\n True\n >>> print examples[1]\n (0.0,[1.01,2.02,3.03])\n \"\"\"\n minPartitions = minPartitions or min(sc.defaultParallelism, 2)\n jSerialized = sc._jvm.PythonMLLibAPI().loadLabeledPoints(sc._jsc, path, minPartitions)\n serialized = RDD(jSerialized, sc, NoOpSerializer())\n return serialized.map(lambda bytes: _deserialize_labeled_point(bytearray(bytes)))\n\n\ndef _test():\n import doctest\n from pyspark.context import SparkContext\n globs = globals().copy()\n # The small batch size here ensures that we see multiple batches,\n # even in these small test examples:\n globs['sc'] = SparkContext('local[2]', 'PythonTest', batchSize=2)\n (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)\n globs['sc'].stop()\n if failure_count:\n exit(-1)\n\n\nif __name__ == \"__main__\":\n _test()\n"
] | [
[
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ScottHull/fEquilibrium | [
"fbc352484d0e60d6b224950f81d6fd730e36cb82"
] | [
"box/Box.py"
] | [
"import os\nimport matplotlib as mpl\n\nmpl.use('Qt5Agg')\nos.sys.path.append(os.path.dirname(os.path.abspath('.')))\nimport numpy as np\nimport pandas as pd\nfrom random import randint\nimport moviepy.editor as mpy\nfrom dynamics.Movement import move_particle\nfrom dynamics.Energy import energy, thermal_eq\nfrom thermodynamics.Solution import solution\nfrom meta.Console import console\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport shutil\nimport sys\nimport matplotlib.cm as cm\nimport matplotlib.colors\nimport matplotlib.colorbar\nfrom math import pi\nfrom numbers import Number\nimport ast\nfrom collections import Counter\n\n\n# TODO: update some methods to class methods to avoid outside interference\nclass box:\n def __init__(self, length, width, height, space_resolution, model_time, visualize_system=False,\n object_history=False, visualize_neighbors=False, animate_neighbors=False):\n \"\"\"\n Instantiates the box.\n :param length: length of the system, in m\n :param width: width of the system, in m\n :param height: height of the system, in m\n :param space_resolution: spatial resolution of the system, in m\n :param model_time: initial time of the system, in 'years ago'\n :param visualize_system: optional parameter to turn on movie generation of the evolution of the system\n\n \"\"\"\n console.pm_header(\"\\n\\n\\nfEquilibrium\\nScott D. Hull, 2017\\n\\n\")\n console.pm_stat(\"Instantiating box. Please sit tight.\")\n self.visualize_neighbors = visualize_neighbors\n self.animate_neighbors = animate_neighbors\n self.length = length # x values of box\n self.width = width # y values of box\n self.height = height # z values of box\n self.model_base = height # sets the model base as the coords directly above the boundary layer\n self.boundary_vals = [] # stores limits of boundaries so that box integrity can be verified\n self.space_resolution = space_resolution # the spatial resolution of the box\n self.model_time = float(model_time) # the amount of time the model will run\n self.initial_time = float(model_time) # the initial value of self.model_time\n # generates all possible coordinate points within the box\n self.coords = self.generate_coordinate_points(length=self.length, width=self.width, height=self.height,\n space_resolution=self.space_resolution)\n self.visualize_system = visualize_system # True/False, create animations of the box?\n self.object_history = object_history # creates an output file that tracks objects with time\n # this is the central dataframe of the model\n # highly critical that this is accessible in memory for all processes\n self.space = pd.DataFrame({\n 'coord_index': [str(i) for i in self.coords],\n 'object_id': np.NAN, # randomly generated object id tag to identify unique elements in box\n 'object': np.NAN, # name of the object, as defined in self.physical_parameters\n 'x_coords': [float(i[0]) for i in self.coords],\n 'y_coords': [float(i[1]) for i in self.coords],\n 'z_coords': [float(i[2]) for i in self.coords],\n 'nearest_neighbors': np.NAN, # x, y, and z neighbors in each direction\n 'object_radius': np.NAN, # in m\n 'density': np.NAN, # in kg/m^3\n 'temperature': np.NAN, # in K\n 'pressure': [(1 * 10 ** 9) for i in self.coords],\n # pressure in pascals, in order to work with ideal gas law\n 'object_velocity': [float(0) for i in self.coords],\n 'x_direct': np.NAN,\n 'y_direct': np.NAN,\n 'z_direct': np.NAN,\n 'potential_energy': np.NAN,\n 'kinetic_energy': np.NAN, # in J\n 'total_energy_released': np.NAN, # in J\n 'mass': np.NAN, # in kg\n 'volume': np.NAN, # in m^3\n 'drag_force': np.NAN, # in N, the drag force exerted on sinking objects\n 'buoyant_force': np.NAN, # in N, the buoyant force exerted on particles (negative = downward buoyant force)\n 'gravitational_force': np.NAN # in N, the force pulling down on the objects due to gravity\n })\n self.num_coords = len(self.coords)\n self.solution = solution(box_length=self.num_coords)\n self.physical_parameters = pd.read_csv(\n os.path.dirname(os.path.abspath('.')) + \"/fEquilibrium/dynamics/physical_parameters.csv\",\n index_col='Material')\n # these are lists and directories for tracking animation frame ordering and storing animation frames\n self.movie_frames1 = []\n self.movie_frames2 = []\n self.movie_frames3 = []\n self.movie_frames4 = []\n if os.path.exists('object_dynamics'):\n shutil.rmtree('object_dynamics')\n if os.path.exists('thermal_equilibrium_heatmap'):\n shutil.rmtree('thermal_equilibrium_heatmap')\n if os.path.exists('nearest_neighbors'):\n shutil.rmtree('nearest_neighbors')\n if os.path.exists('temp_distrib_floor'):\n shutil.rmtree('temp_distrib_floor')\n os.mkdir('object_dynamics')\n os.mkdir('thermal_equilibrium_heatmap')\n os.mkdir('nearest_neighbors')\n os.mkdir('temp_distrib_floor')\n # opens the object history csv so object histories can be written after each time interval\n if self.object_history is True:\n if \"object_history.csv\" in os.listdir(os.getcwd()):\n os.remove(\"object_history.csv\")\n self.object_output = open(\"object_history.csv\", 'w')\n header = ['Model Time']\n for i in self.space.columns.tolist():\n header.append(str(i))\n formatted_header = \",\".join(i for i in header)\n self.object_output.write(\"{}\\n\".format(formatted_header))\n # opens the object velocity csv so object velocities can be written after each time interval\n if 'object_velocities.csv' in os.listdir(os.getcwd()):\n os.remove('object_velocities.csv')\n self.velocity_output = open('object_velocities.csv', 'a')\n\n # returns a copy of the self.space dataframe\n def get_box(self):\n return self.space\n\n def classify_neighbors(self, animate_neighbors, visualize_neighbors):\n \"\"\"\n classifies nearest neighbors, primarily for heat equilibrium\n assumption is that each point in the box is only in contact with its nearest neighbor\n this only executes once at the initial model time\n :param animate_neighbors:\n :param visualize_neighbors:\n :return: None\n \"\"\"\n loop_total = len(self.space.index.tolist())\n console.pm_stat(\"Finding nearest neighbors for all points. This may take several minutes...\")\n # print(\"Finding nearest neighbors for all points. This may take several minutes...\")\n min_xcoords = 0.0\n max_xcoords = float(self.length)\n min_ycoords = 0.0\n max_ycoords = float(self.width)\n min_zcoords = 0.0\n max_zcoords = float(self.height)\n # iterates through each coordinate point in the box and identifies the nearest neighbors\n for row in self.space.itertuples():\n index = row.Index\n neighbors = thermal_eq.explicit_nearest_neighboor(system_data=self.space,\n x_coord=self.space['x_coords'][index],\n y_coord=self.space['y_coords'][index],\n z_coord=self.space['z_coords'][index],\n space_resolution=self.space_resolution,\n minx=min_xcoords, maxx=max_xcoords,\n miny=min_ycoords, maxy=max_ycoords,\n minz=min_zcoords, maxz=max_zcoords,\n animate_neighbors=animate_neighbors,\n visualize_neighbors=visualize_neighbors)\n self.space['nearest_neighbors'][index] = str(neighbors)\n console.pm_flush(message=\"Found neighbors for {}/{} coordinate points.\".format(index + 1, loop_total))\n if animate_neighbors is True:\n self.movie_frames3.append('snap_{}-{}-{}.png'.format(self.space['x_coords'][index],\n self.space['y_coords'][index],\n self.space['z_coords'][index]))\n print(\"\")\n if animate_neighbors is True:\n # self.space.to_csv(\"space2_coords_check.csv\")\n import moviepy.editor as mpy\n import os\n os.chdir(os.getcwd() + \"/nearest_neighbors\")\n animation = mpy.ImageSequenceClip(self.movie_frames3,\n fps=5,\n load_images=True)\n animation.write_gif('neighbors.gif', fps=5)\n os.chdir(\"..\")\n return None\n\n def generate_coordinate_points(self, length, width, height, space_resolution):\n \"\"\"\n Generates all possible coordinate points within the defined box\n :param length: length of the system, in m\n :param width: width of the system, in m\n :param height: height of the system, in m\n :param space_resolution: spatial resolution of the system, in m\n :return: coords, a list of all coordinate points available in the system\n \"\"\"\n console.pm_stat(\"Generating coordinates...\")\n # print(\"Generating coordinates...\")\n coords = []\n x_coords_range = np.arange(0, round((length + space_resolution), len(str(space_resolution))),\n space_resolution) # generate range of x-coords\n y_coords_range = np.arange(0, round((width + space_resolution), len(str(space_resolution))),\n space_resolution) # generate range of y-coords\n z_coords_range = np.arange(0, round((height + space_resolution), len(str(space_resolution))),\n space_resolution) # generate range of z-coords\n for i in x_coords_range:\n for j in y_coords_range:\n for q in z_coords_range:\n temp_coords = []\n temp_coords.append(round(i, len(str(space_resolution))))\n temp_coords.append(round(j, len(str(space_resolution))))\n temp_coords.append(round(q, len(str(space_resolution))))\n coords.append(temp_coords)\n console.pm_stat(\"Coordinates generated!\")\n # print(\"Coordinates generated!\")\n return coords\n\n def round_coord_arbitrary(self, coordinate, system_data, coordinate_type):\n \"\"\"\n Rounds a calculated coordinate to the nearest one defined by the spatial resolution\n :param coordinate:\n :param system_data:\n :param coordinate_type:\n :return: rounded_coordinate\n \"\"\"\n rounded_coordinate = ''\n found_min = ''\n for i in system_data[coordinate_type]:\n attempted_min = abs(coordinate - i)\n if found_min == '':\n found_min = attempted_min\n rounded_coordinate = i\n else:\n if attempted_min < found_min:\n found_min = attempted_min\n rounded_coordinate = i\n return rounded_coordinate\n\n def check_coords(self, x_coord, y_coord, z_coord):\n console.pm_stat(\"Checking if coordinates are valid for object insertion...\")\n x_min, x_max = self.space['x_coords'][0], self.space['x_coords'][len(self.coords) - 1]\n y_min, y_max = self.space['y_coords'][0], self.space['y_coords'][len(self.coords) - 1]\n z_min, z_max = self.space['z_coords'][0], self.space['z_coords'][len(self.coords) - 1]\n if x_coord >= x_min and x_coord <= x_max:\n if y_coord >= y_min and y_coord <= y_max:\n if z_coord >= z_min and z_coord <= z_max:\n console.pm_stat(\"Coordinates validated for object insertion!\")\n return True\n else:\n console.pm_err(\"Coordinates invalid!\")\n return False\n\n def generate_object_id(self, matrix):\n \"\"\"\n Generates object ID codes so that specific objects and materials can be tracked\n Object/matrial types are unique and coded by the a letter specifying the general type followed by a\n unique number combination\n The general object/material types are coded by the first letter as follows:\n - 'A' = object\n - 'B' = matrix\n - 'C' = boundary\n :param matrix:\n :return: object_id\n \"\"\"\n\n def random_gen(object_identifier):\n object_id = object_identifier + str(randint(0, len(self.coords) + len(self.coords)))\n return object_id\n\n if matrix is True:\n object_id = random_gen(object_identifier='B') # matrix material objects begin with a B\n while object_id in self.space['object_id'].tolist():\n object_id = random_gen(object_identifier='B') # matrix material objects begin with a B\n return object_id\n else:\n object_id = random_gen(object_identifier='A') # non-matrix material objects begin with a A\n while object_id in self.space['object_id'].tolist():\n object_id = random_gen(object_identifier='A') # matrix material objects begin with a A\n return object_id\n\n # def insert_at_coord(self, x_coord, y_coord, z_coord):\n # \"\"\"\n # :param x_coord:\n # :param y_coord:\n # :param z_coord:\n # :return: row, the row index value in the self.space dateframe of the coordinate in question\n # \"\"\"\n # space_copy = self.space.copy(deep=True)\n # space_copy.set_index(['x_coords', 'y_coords', 'z_coords'], inplace=True)\n # row = space_copy.loc(x_coord, y_coord, z_coord)\n #\n # return row\n\n def insert_object(self, object, x_coord, y_coord, z_coord, object_radius, composition, initial_temperature):\n \"\"\"\n Allows the insertion of an object into the box\n The object should NOT be inserted into coordinates occupied by boundaries\n This function should be called AFTER matrix insertion--else it will be overwritten\n :param object:\n :param x_coord:\n :param y_coord:\n :param z_coord:\n :param object_radius:\n :param composition:\n :param initial_temperature:\n :return: None\n \"\"\"\n console.pm_stat(\"Inserting object...\")\n if object in self.physical_parameters.index:\n if self.check_coords(x_coord=x_coord, y_coord=y_coord,\n z_coord=z_coord) is True: # checks to verify that coordinates exist in space\n for row in self.space.itertuples():\n index = row.Index\n if self.space['x_coords'][index] == x_coord:\n if self.space['y_coords'][index] == y_coord:\n if self.space['z_coords'][index] == z_coord: # verifies that coordinates match to Dataframe\n self.space['object'][\n index] = object # the name of the object, as defined in dynamics/physical_parameters.csv\n self.space['object_id'][index] = self.generate_object_id(\n matrix=False) # generates object ID\n self.space['object_radius'][index] = object_radius # in m\n self.space['volume'][index] = (4 / 3) * pi * (\n object_radius) ** 3 # assume volume of object is a perfect sphere\n self.space['mass'][index] = self.physical_parameters['Density'][object] * \\\n self.space['volume'][index] # mass = density * volume\n self.space['temperature'][index] = initial_temperature\n self.space['object_density'] = float(self.space['mass'][index]) / ((4 / 3) * pi *\n float(self.space[\n 'object_radius'][\n index]) ** 3) # assume object is a perfect sphere\n self.solution.create_solution(box=self.space, composition=composition, row=index,\n object=object)\n console.pm_flush(\"Inserted object ({}) at coordinates: x:{} y:{}, z:{}\".format(\n self.space['object'][index],\n self.space['x_coords'][index],\n self.space['y_coords'][index],\n self.space['z_coords'][index]))\n break\n\n else:\n console.pm_err(\"Could not insert object! Outside of defined coordinate points!\")\n sys.exit(1)\n else:\n console.pm_err(\"Object not defined in {}! Cannot insert object!\".format(\n os.getcwd() + \"/dynamics/physical_parameters.csv\"))\n sys.exit(1)\n return None\n\n # TODO: allow for the definition of matrix temperature or a matrix temperature gradient (starting temp, temp gradient\n def insert_matrix(self, matrix_material, composition, initial_temperature, z_range=[0, 0]):\n \"\"\"\n This function allows for the insertion of a matrix material over a given z-range\n This function should be called FIRST when constructing the box\n :param matrix_material:\n :param composition:\n :param initial_temperature:\n :param z_range: The depths at which the matrix should be inserted into the box\n :return: None\n \"\"\"\n console.pm_stat(\"Inserting matrix...\")\n if matrix_material in self.physical_parameters.index:\n if z_range[1] == 0:\n # z range is a list of two numbers, the minimum depth at the index 0, and the maximum depth at index 1\n for row in self.space.itertuples():\n index = row.Index\n self.space['object_id'][index] = self.generate_object_id(matrix=True)\n self.space['object'][index] = matrix_material\n self.space['temperature'][index] = initial_temperature\n self.solution.create_solution(box=self.space, composition=composition, row=index,\n object=matrix_material)\n console.pm_flush(\n \"Inserted matrix ({}) at coordinates: x:{} y:{}, z:{}\".format(self.space['object'][index],\n self.space['x_coords'][index],\n self.space['y_coords'][index],\n self.space['z_coords'][\n index]))\n else:\n for row in self.space.itertuples():\n index = row.Index\n if round(z_range[0], len(str(self.space_resolution))) <= self.space['z_coords'][index] <= round(\n z_range[1], len(str(self.space_resolution))):\n self.space['object_id'][index] = self.generate_object_id(matrix=True)\n self.space['object'][index] = matrix_material\n self.space['temperature'][index] = initial_temperature\n self.solution.create_solution(box=self.space, composition=composition, row=index,\n object=matrix_material)\n console.pm_flush(\n \"Inserted matrix ({}) at coordinates: x:{} y:{}, z:{}\".format(self.space['object'][index],\n self.space['x_coords'][index],\n self.space['y_coords'][index],\n self.space['z_coords'][\n index]))\n print(\"\")\n console.pm_stat(\"Matrix material(s) ({}) inserted!\".format(matrix_material))\n\n else:\n console.pm_err(\"Matrix material not defined in {}! Cannot insert matrix material!\".format(\n os.getcwd() + \"/dynamics/physical_parameters.csv\"))\n sys.exit(1)\n return None\n\n def insert_boundary(self, temperature, z_range, boundary_location='bottom', flux=True):\n \"\"\"\n Insert a boundary layer for the purpose of regulating z-gradients in heat exchange.\n It is recommended that a boundary layer is inserted\n :param temperature:\n :param z_range:\n :param boundary_location: Either the boundary layer is on the 'top' or the 'bottom' of the model.\n The boundary location defaults to bottom if not explicitly stated.\n :param flux: allow heat flux from the boundary layers to permeate the rest of the model\n :return:\n \"\"\"\n if z_range[1] != 0:\n self.boundary_vals.append(z_range[0])\n self.boundary_vals.append(z_range[1])\n if boundary_location == 'bottom':\n self.model_base = z_range[\n 0] # base of model considered to be the top (highest z-coordinate) of boundary layer\n for row in self.space.itertuples():\n index = row.Index\n if round(z_range[0], len(str(self.space_resolution))) <= self.space['z_coords'][index] <= round(\n z_range[1], len(str(self.space_resolution))):\n self.space['object_id'][index] = 'C'\n self.space['object'][index] = \"Boundary\"\n self.space['temperature'][index] = temperature\n console.pm_flush(\"Inserted boundary at coordinates: x:{} y:{}, z:{}\".format(\n self.space['x_coords'][index],\n self.space['y_coords'][index],\n self.space['z_coords'][\n index]))\n print(\"\")\n console.pm_stat(\"Boundary layer inserted between z-range: {}m-{}m!\".format(z_range[0], z_range[1]))\n\n def visualize_box(self):\n \"\"\"\n Constructs animation frames that allows for the visualization of the box\n :return: None\n \"\"\"\n # creates the 3D diapir movement animation frames\n if self.visualize_system != False:\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.set_xlim(xmin=min(self.space['x_coords']), xmax=max(self.space['x_coords']))\n ax.set_ylim(ymin=min(self.space['y_coords']), ymax=max(self.space['y_coords']))\n ax.set_zlim(zmin=min(self.space['z_coords']), zmax=max(self.space['z_coords']))\n for row in self.space.itertuples():\n index = row.Index\n x = self.space['x_coords'][index]\n y = self.space['y_coords'][index]\n z = self.space['z_coords'][index]\n try:\n if str(self.space['object_id'][index])[0] == 'A':\n ax.scatter3D(x, y, z, color='b', s=self.space['object_radius'][index] * 100)\n except:\n self.space.to_csv(\"alskdfjakhsdf.csv\")\n sys.exit(1)\n ax.set_title(\"System 3D Heatmap at Time {}\".format(self.model_time))\n ax.set_xlabel(\"Box Length (x) (m)\")\n ax.set_ylabel(\"Box Width (y) (m)\")\n ax.set_zlabel(\"Box Height (z) (m)\")\n ax.invert_zaxis()\n fig.savefig(os.getcwd() + '/object_dynamics/snap_{}.png'.format(self.model_time), format='png')\n fig.clf()\n self.movie_frames1.append('snap_{}.png'.format(self.model_time))\n console.pm_stat(\"System snapshot created: {}\".format('snap_{}.png'.format(self.model_time)))\n\n # creates 3D heatmap animation frames\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.set_xlim(xmin=min(self.space['x_coords']), xmax=max(self.space['x_coords']))\n ax.set_ylim(ymin=min(self.space['y_coords']), ymax=max(self.space['y_coords']))\n ax.set_zlim(zmin=min(self.space['z_coords']), zmax=max(self.space['z_coords']))\n for row in self.space.itertuples():\n index = row.Index\n x = self.space['x_coords'][index]\n y = self.space['y_coords'][index]\n z = self.space['z_coords'][index]\n # velocity_x = self.space['x_direct'][row]\n try:\n if str(self.space['object_id'][index][0]) == 'A':\n # print(\"Plotted object at: x:{} y:{} z:{}.\".format(x, y, z))\n ax.scatter3D(x, y, z, color='b', s=self.space['object_radius'][index] * 100)\n except:\n self.space.to_csv(\"alskdfjakhsdf.csv\")\n sys.exit(1)\n # norm_colors = mpl.colors.Normalize(vmin=self.space['temperature'].min(), vmax=self.space['temperature'].max())\n norm_colors = mpl.colors.Normalize(vmin=1900, vmax=2200)\n colorsmap = matplotlib.cm.ScalarMappable(norm=norm_colors, cmap='jet')\n colorsmap.set_array(self.space['temperature'])\n ax.scatter(self.space['x_coords'], self.space['y_coords'], self.space['z_coords'], marker='s', s=140,\n c=self.space['temperature'], cmap='jet', alpha=0.50)\n cb = fig.colorbar(colorsmap)\n ax.set_title(\"System 3D Heatmap at Time {}\".format(self.model_time))\n ax.set_xlabel(\"Box Length (x) (m)\")\n ax.set_ylabel(\"Box Width (y) (m)\")\n ax.set_zlabel(\"Box Height (z) (m)\")\n ax.invert_zaxis()\n fig.savefig(os.getcwd() + '/thermal_equilibrium_heatmap/snap_{}.png'.format(self.model_time), format='png')\n self.movie_frames2.append('snap_{}.png'.format(self.model_time))\n fig.clf()\n\n # creates the 3D model base trisurf animation frames\n x_coords = []\n y_coords = []\n temperature = []\n for row in self.space.itertuples():\n index = row.Index\n surface_zcoord = round((self.model_base - self.space_resolution), len(str(self.space_resolution)))\n if float(self.space['z_coords'][index]) == surface_zcoord:\n x_coords.append(self.space['x_coords'][index])\n y_coords.append(self.space['y_coords'][index])\n temperature.append(self.space['temperature'][index])\n fig = plt.figure()\n ax = Axes3D(fig)\n ax.plot_trisurf(x_coords, y_coords, temperature)\n ax.set_xlabel(\"Box Length (x) (m)\")\n ax.set_ylabel(\"Box Width (y) (m)\")\n ax.set_zlabel(\"Temperature (degK)\")\n ax.set_zlim(zmin=1990, zmax=2500)\n ax.set_title(\"Temperature Distribution at Time {} At Base of Model\".format(self.model_time))\n fig.savefig(os.getcwd() + '/temp_distrib_floor/snap_{}.png'.format(self.model_time), format='png')\n self.movie_frames4.append('snap_{}.png'.format(self.model_time))\n fig.clf()\n return None\n\n @staticmethod\n def grab_row_index_by_coord(system_data, x_coord, y_coord, z_coord):\n \"\"\"\n Returns the index of the row in the instance's Pandas dataframe by associating with x, y, and z coordinates stored\n in the dataframe.\n :param system_data:\n :param x_coord:\n :param y_coord:\n :param z_coord:\n :return: row, the index\n \"\"\"\n for row in system_data.itertuples():\n index = row.Index\n if system_data['x_coords'][index] == x_coord:\n if system_data['y_coords'][index] == y_coord:\n if system_data['z_coords'][index] == z_coord:\n return index\n\n @staticmethod\n def swap_rows(system_data, update_space, from_row_index, to_row_index):\n stationary_columns = ['x_coords', 'y_coords', 'z_coords', 'coord_index', 'nearest_neighbors']\n for i in system_data:\n if i not in stationary_columns:\n cp_from = system_data[i][from_row_index]\n cp_to = system_data[i][to_row_index]\n system_data[i][to_row_index] = cp_from\n system_data[i][from_row_index] = cp_to\n\n return system_data\n\n\n def replace_fromobject(self, system_data, update_space, from_object_index, to_object_index, stationary_columns):\n found = False\n from_neighbors = ast.literal_eval(system_data['nearest_neighbors'][from_object_index])\n avg_temps_list = []\n avg_object_list = []\n for i in from_neighbors:\n for z in from_neighbors[i]:\n if \"+\" in z or \"-\" in z:\n if len(from_neighbors[i][z]['index']) != 0:\n temp = system_data['temperature'][from_neighbors[i][z]['index']].values.tolist()[0]\n avg_obj = system_data['object'][from_neighbors[i][z]['index']].values.tolist()[0]\n if avg_obj != 'Boundary':\n avg_temps_list.append(temp)\n avg_object_list.append(avg_obj)\n avg_temp = sum(avg_temps_list) / len(avg_temps_list)\n common_obj = Counter(avg_object_list).most_common()[0][0]\n for i in from_neighbors:\n for z in from_neighbors[i]:\n obj = system_data['object'][from_neighbors[i][z]['index']].values.tolist()[0]\n if obj == common_obj:\n for q in system_data:\n if q not in stationary_columns:\n system_data[q][from_object_index] = system_data[q][from_neighbors[i][z]['index']].values.tolist()[0]\n system_data['temperature'][from_object_index] = avg_temp\n found = True\n break\n if found is True:\n break\n\n def define_path(self, start, end, length, width, height):\n \"\"\"\n Get the list of coordinates between two points in the box.\n :param start: a list of path start coordinates, [length, width, height] (i.e. [x, y, z])\n :param end: a list of path end coordinates, [length, width, height] (i.e. [x, y, z])\n :param length: x-coordinates\n :param width: y-coordinates\n :param height: z-coordinates\n :return: path, the list of lists of coordinates in the path\n \"\"\"\n start_x = start[0]\n start_y = start[1]\n start_z = start[2]\n end_x = end[0]\n end_y = end[1]\n end_z = end[2]\n path_x = end_x - start_x\n path_y = end_y - start_y\n path_z = end_z - start_z\n\n\n\n def merge_objects(self, to_object_index, from_object_index, system_data, update_space):\n \"\"\"\n When two objects of the same type occupy the same point in coordinate space, they will merge.\n The deepest object (i.e. the \"to\" destination of the \"from\" diapir) will inherit all properties.\n The object doing the sinking will merge to the deeper object + disappear.\n :param to_object_index:\n :param from_object_index:\n :param system_data:\n :return:\n \"\"\"\n stationary_columns = ['x_coords', 'y_coords', 'z_coords', 'coord_index',\n 'nearest_neighbors'] # columns that are not swapped\n additive_columns = ['mass', 'volume'] # columns that contain additive data when diapirs merge\n console.pm_stat(\"Objects {} and {} will merge to object {}!\".format(system_data['object_id'][from_object_index],\n system_data['object_id'][\n to_object_index], system_data['object_id'][\n to_object_index]))\n for i in system_data:\n # makes sure that the column is an additive property\n if (i not in stationary_columns) and (i in additive_columns):\n # adds the values\n system_data[i][to_object_index] = system_data[i][to_object_index] + system_data[i][\n from_object_index]\n # takes an average of temperatures of the merging objects for the temperature of the merged object\n # this should eventually be weighted by object radius\n system_data['object_radius'][to_object_index] = ((system_data['volume'][to_object_index] * 3) / (4 * pi))**(1/3) # V = (4/3)*pi*r^3 --> r = ((3V)/(4pi))^(1/3)\n system_data['temperature'][to_object_index] = (system_data['temperature'][from_object_index] +\n system_data['temperature'][to_object_index]) / 2\n self.replace_fromobject(system_data=system_data, update_space=system_data, from_object_index=from_object_index,\n stationary_columns=stationary_columns, to_object_index=to_object_index)\n return system_data\n\n # TODO: seperate velocity calculations from system movement so space dataframe can be updated and moved according to velocity contents\n @classmethod\n def calculate_velocities(cls):\n pass\n\n # # TODO: calculate the distance between the two points, and then find nearest coordinate neighbors along the path to account for x,y,z\n # # TODO: right now, just operates in z-direction. will have to rewrite entire method if lateral motion is to occur\n # @classmethod\n # def gather_path_coords(cls, system_data, from_zcoord, to_zcoord, x_coord, y_coord, from_xcoord=None, to_xcoord=None,\n # from_ycoord=None, to_ycoord=None):\n # path_coords = []\n # for row in system_data.index:\n # unique_path_coords = [] # x coord at index 0, y coord at 1, z coord at 2\n # if float(system_data['x_coord'][row]) == float(x_coord) and float(system_data['y_coord']) == float(y_coord):\n # if float(system_data['z_coord'][row]) >= float(from_zcoord) and float(system_data['z_coord'][row]) <= to_zcoord:\n # # assumption is that z axis is inverted, but z values increase with decreasing depth\n # unique_path_coords.append(float(x_coord))\n # unique_path_coords.append(float(y_coord))\n # unique_path_coords.append(float(system_data['z_coord'][row]))\n # path_coords.append(unique_path_coords)\n # return path_coords\n\n def move_systems(self, system_data, update_space, deltaTime, box_height, space_resolution,\n default_matrix_material='Silicate Liquid'):\n \"\"\"\n Allows for the movement of objects to occur within the box if objects are gravitationally unstable\n :param system_data:\n :param update_space:\n :param deltaTime:\n :param box_height:\n :param space_resolution:\n :param default_matrix_material:\n :return: update_space_copy, a copy of the self.space dataframe with updated object/matrix positions\n \"\"\"\n # update_space_copy = update_space.copy(deep=True)\n inactive_objects = []\n update_space_copy = self.space\n for row in system_data.itertuples():\n index = row.Index\n # object_id's that begin with 'A' are objects and will be free to move\n if str(system_data['object_id'][index][0]) == 'A' and str(system_data['object_id'][index]) not in inactive_objects:\n inactive_objects.append(str(system_data['object_id'][index]))\n curr_x_coords = system_data['x_coords'][index]\n curr_y_coords = system_data['y_coords'][index]\n curr_z_coords = system_data['z_coords'][index]\n object_velocity = 0\n\n matrix_material = default_matrix_material # the default matrix matrial until overwritten\n matrix_material_temp = 0.0\n matrix_material_pressure = 0.0\n # assumption is that object will travel through matrix most like that occupying z coord below it.\n # code block below attempts to idenfity that material\n if (system_data['z_coords'][index] + space_resolution) in system_data['z_coords']:\n searchfor_coord = (system_data['z_coords'][index] + space_resolution)\n for row2 in system_data.itertuples():\n index2 = row2.Index\n if system_data['z_coords'][index2] == searchfor_coord and system_data['y_coords'][index2] \\\n == curr_y_coords and system_data['x_coords'][index2] == curr_x_coords:\n matrix_material = system_data['object'][index2]\n matrix_material_temp = system_data['temperature'][index2]\n matrix_material_pressure = system_data['pressure'][index2]\n break\n object_velocity = move_particle(body_type=system_data['object'][index],\n system_params=system_data).stokes_settling(\n object=system_data['object'][index], matrix_material=matrix_material,\n matrix_material_temp=matrix_material_temp, matrix_material_pressure=matrix_material_pressure,\n object_radius=system_data['object_radius'][index])\n z_dis_obj_travel = object_velocity * deltaTime\n updated_x_coord = round(system_data['x_coords'][index], len(str(space_resolution)))\n updated_y_coord = round(system_data['y_coords'][index], len(str(space_resolution)))\n # round the z-coordinate to the nearest point within the spatial resolution\n updated_z_coord = round(self.round_coord_arbitrary(\n coordinate=(z_dis_obj_travel + system_data['z_coords'][index]),\n system_data=system_data, coordinate_type='z_coords'), len(str(space_resolution)))\n rounded_z_distance_travelled = round(updated_z_coord - curr_z_coords, len(str(\n space_resolution))) # use this distance for distance travelled, as it is more self-consistent within the model\n # check to see if object travels into boundary layer. if so, put it in nearest point within spatial resolution ABOVE boundary layer\n if round(rounded_z_distance_travelled + curr_z_coords, len(str(space_resolution))) >= self.model_base:\n updated_z_coord = round(self.model_base - self.space_resolution,\n len(str(space_resolution))) # fix the z-coord\n rounded_z_distance_travelled = round(updated_z_coord - curr_z_coords,\n len(str(space_resolution))) # fix the distance travelled\n # checks to make sure that the space/time resolution was big enough for the object to move. if not, velocity/distance_travelled = 0\n if rounded_z_distance_travelled == 0:\n object_velocity = 0\n z_dis_obj_travel = 0\n # get the index of the coordinate point where the object will travel to\n to_row_index = self.grab_row_index_by_coord(system_data=system_data,\n x_coord=updated_x_coord,\n y_coord=updated_y_coord,\n z_coord=updated_z_coord)\n from_row_index = self.grab_row_index_by_coord(system_data=system_data,\n x_coord=system_data['x_coords'][index],\n y_coord=system_data['y_coords'][index],\n z_coord=system_data['z_coords'][index])\n # update the copy of the dataframe with the appropriate changes\n if object_velocity != 0:\n console.pm_flush(\n \"Object {} will move! ({},{},{} to {},{},{})\".format(system_data['object_id'][index],\n curr_x_coords, curr_y_coords,\n curr_z_coords, updated_x_coord,\n updated_y_coord, updated_z_coord))\n # stokes_data returns degK, F_g, F_b, F_d\n stokes_data = energy().stokes_frictional_energy(\n object=system_data['object'][index], matrix_material=matrix_material,\n body_radius=system_data['object_radius'][index],\n body_mass=system_data['mass'][index], distance_travelled=rounded_z_distance_travelled,\n object_velocity=object_velocity)\n system_data['temperature'][index] = float(\n system_data['temperature'][index]) + stokes_data[\n 0] # grabs degK from stokes_data & adjusts the temperature\n system_data['drag_force'][index] = float(stokes_data[1]) # gets drag force and adds it to the dataframe\n system_data['buoyant_force'][index] = float(\n stokes_data[2]) # gets buoyant force and adds it to the dataframe\n system_data['gravitational_force'][index] = float(\n stokes_data[3]) # gets gravitational force and adds it to the dataframe\n system_data['object_velocity'][index] = object_velocity\n system_data['z_direct'][index] = object_velocity\n system_data['potential_energy'][index] = energy().potential_energy(mass=system_data['mass'][index],\n height=system_data['z_coords'][\n index],\n box_height=box_height)\n system_data['kinetic_energy'][index] = energy().kinetic_energy(mass=system_data['mass'][index],\n velocity=system_data['object_velocity'][\n index])\n if object_velocity != 0:\n console.pm_stat(\"Object will move! {} ({}) will move from x:{} y:{} z:{} to x:{} y:{} z:{} (velocity: {})\".format(\n system_data['object_id'][index], system_data['object'][index], system_data['x_coords'][index],\n system_data['y_coords'][index], system_data['z_coords'][index], updated_x_coord, updated_y_coord,\n updated_z_coord, system_data['object_velocity'][index]))\n # check to see if two objects of the same type will collide\n # if two objects of the same type collide, they will merge\n # else, just swap points with the matrix material at the destination coordinate point\n if (system_data['object'][from_row_index] == system_data['object'][to_row_index]) and \\\n (system_data['object_id'][from_row_index] != system_data['object_id'][to_row_index]):\n update_space_copy = self.merge_objects(to_object_index=to_row_index, from_object_index=from_row_index, system_data=system_data, update_space=update_space)\n else:\n if object_velocity != 0:\n update_space_copy = self.swap_rows(system_data=system_data, update_space=update_space,\n from_row_index=from_row_index, to_row_index=to_row_index)\n print(\"\")\n return update_space_copy\n\n\n def certify_box(self):\n for row in self.space.itertuples():\n index = row.Index\n try:\n if 'A' in self.space['object_id'][index]:\n pass\n except:\n console.pm_err(\n \"Box integrity check failed. Please check your z-ranges to make sure all \"\n \"coordinate spaces are filled..\")\n sys.exit(1)\n res = [self.width, self.length, self.height]\n for i in res:\n if (i % self.space_resolution) - self.space_resolution >= 0:\n console.pm_err(\"Box integrity check failed. Your space resolution is not a multiple of \"\n \"the box length, width, and/or height.\")\n sys.exit(1)\n for i in self.boundary_vals:\n if (i % self.space_resolution) - self.space_resolution >= 0:\n console.pm_err(\"Box integrity check failed. Your space resolution is not a multiple of \"\n \"the boundary layer limit(s).\")\n sys.exit(1)\n console.pm_stat(\"Box integrity confirmed. Calculations allowed to proceed.\")\n\n\n\n\n # TODO: update x and y coords\n def update_system(self, auto_update=True, deltaTime=1.0):\n \"\"\"\n Updates the system thermal/dynamic/chemical state at each time interval\n :param auto_update:\n :param deltaTime:\n :return: self.model_time, self.space\n \"\"\"\n console.pm_stat(\"Model time at: {}\".format(self.model_time))\n # update_space = self.space.copy(deep=True)\n # this section only executes at the initial time--no object or thermal movement occurs here\n if self.model_time == self.initial_time:\n # check the integrity of the box before time and neighbor identifification allowed to progress\n self.certify_box()\n # if box integrity confirmed, proceed to nearest neighbor identification\n self.classify_neighbors(visualize_neighbors=self.visualize_neighbors,\n animate_neighbors=self.animate_neighbors)\n # create an initial snapshot of the box\n self.visualize_box()\n # writes an object history output file if flagged in box setup\n if self.object_history is True:\n for row in self.space.itertuples():\n index = row.Index\n if 'A' in self.space['object_id'][index]:\n contents = []\n contents.append(str(self.model_time))\n for i in self.space:\n contents.append(str(self.space[i][index]))\n formatted_contents = \",\".join(i.replace(\",\", \":\") for i in contents)\n self.object_output.write(\"{}\\n\".format(formatted_contents))\n # executes when the model time is exhausted--writes output files and animations and then ends the simulation\n elif self.model_time <= 0:\n self.visualize_box()\n console.pm_stat(\"Model at minimum time!\")\n if self.visualize_system is True:\n console.pm_stat(\"Writing animations...\")\n\n # dynamics animation\n os.chdir(os.getcwd() + '/object_dynamics')\n animation = mpy.ImageSequenceClip(self.movie_frames1,\n fps=round((self.initial_time / (self.initial_time / 3))),\n load_images=True)\n os.chdir('..')\n animation.write_videofile('object_dynamics.mp4',\n fps=round((self.initial_time / (self.initial_time / 3))), audio=False)\n animation.write_gif('object_dynamics.gif',\n fps=round((self.initial_time / (self.initial_time / 3))))\n console.pm_stat(\"Animation created & available in {}!\".format(os.getcwd()))\n\n # 3d heatmap animation\n os.chdir(os.getcwd() + '/thermal_equilibrium_heatmap')\n animation = mpy.ImageSequenceClip(self.movie_frames2,\n fps=round((self.initial_time / (self.initial_time / 3))),\n load_images=True)\n os.chdir('..')\n animation.write_videofile('thermal_equilibrium_heatmap.mp4',\n fps=round((self.initial_time / (self.initial_time / 3))), audio=False)\n animation.write_gif('thermal_equilibrium_heatmap.gif',\n fps=round((self.initial_time / (self.initial_time / 3))))\n console.pm_stat(\"Animation created & available in {}!\".format(os.getcwd()))\n\n # 3d model base heat distribution animation\n os.chdir(os.getcwd() + '/temp_distrib_floor')\n animation = mpy.ImageSequenceClip(self.movie_frames4,\n fps=round((self.initial_time / (self.initial_time / 3))),\n load_images=True)\n os.chdir('..')\n animation.write_videofile('temp_distrib_floor.mp4',\n fps=round((self.initial_time / (self.initial_time / 3))), audio=False)\n animation.write_gif('temp_distrib_floor.gif',\n fps=round((self.initial_time / (self.initial_time / 3))))\n console.pm_stat(\"Animation created & available in {}!\".format(os.getcwd()))\n\n # writes the central pandas dataframe to 'space.csv'. most critical model info contained here\n self.space.to_csv(\"space.csv\")\n # writes the chemical compositions to 'solution.csv'\n self.solution.get_solution().to_csv(\"solution.csv\")\n # writes the object history output file\n if self.object_history is True:\n for row in self.space.itertuples():\n index = row.Index\n if 'A' in self.space['object_id'][index]:\n contents = []\n contents.append(str(self.model_time))\n for i in self.space:\n contents.append(str(self.space[i][index]))\n formatted_contents = \",\".join(i.replace(\",\", \":\") for i in contents)\n self.object_output.write(\"{}\\n\".format(formatted_contents))\n if self.object_output is True:\n self.object_output.close()\n return self.model_time, self.space\n else:\n # models the object movement\n self.move_systems(system_data=self.space, update_space=None, deltaTime=deltaTime,\n box_height=self.height, space_resolution=self.space_resolution)\n update_space = self.space.copy(deep=True)\n # updates chemical compositions\n update_solution = self.solution.update_solution(deltaTime=deltaTime)\n # models thermal equilibrium\n therm_eq_update_space = thermal_eq().D3_thermal_eq(system_data=update_space, deltaTime=deltaTime,\n space_resolution=self.space_resolution)\n for row in update_space.itertuples():\n index = row.Index\n if 'A' in update_space['object_id'][index]:\n self.velocity_output.write(\"\\n{}\".format(update_space['object_velocity'][index]))\n self.visualize_box()\n self.space = update_space\n if self.object_history is True:\n for row in self.space.itertuples():\n index = row.Index\n if 'A' in self.space['object_id'][index]:\n contents = []\n contents.append(str(self.model_time))\n for i in self.space:\n contents.append(str(self.space[i][index]))\n formatted_contents = \",\".join(i.replace(\",\", \":\") for i in contents)\n self.object_output.write(\"{}\\n\".format(formatted_contents))\n # auto-update calculates the appropriate deltaTime, if one is not defined\n if auto_update is True:\n if self.model_time == deltaTime:\n self.model_time -= deltaTime\n self.update_system(auto_update=False, deltaTime=deltaTime)\n elif self.model_time > deltaTime:\n self.model_time -= deltaTime\n self.update_system(auto_update=auto_update, deltaTime=deltaTime)\n else:\n return self.model_time, self.space\n else:\n return self.model_time, self.space\n\n\n"
] | [
[
"matplotlib.use",
"matplotlib.colors.Normalize",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tmtmaj/deep-high-resolution-net.pytorch | [
"59513d90c30fb736e2a7f75186303eb5824e6be8"
] | [
"cocoeval_dacon.py"
] | [
"__author__ = 'tsungyi'\n\nimport numpy as np\nimport datetime\nimport time\nfrom collections import defaultdict\nfrom . import mask as maskUtils\nimport copy\n\nclass COCOeval:\n # Interface for evaluating detection on the Microsoft COCO dataset.\n #\n # The usage for CocoEval is as follows:\n # cocoGt=..., cocoDt=... # load dataset and results\n # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object\n # E.params.recThrs = ...; # set parameters as desired\n # E.evaluate(); # run per image evaluation\n # E.accumulate(); # accumulate per image results\n # E.summarize(); # display summary metrics of results\n # For example usage see evalDemo.m and http://mscoco.org/.\n #\n # The evaluation parameters are as follows (defaults in brackets):\n # imgIds - [all] N img ids to use for evaluation\n # catIds - [all] K cat ids to use for evaluation\n # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation\n # recThrs - [0:.01:1] R=101 recall thresholds for evaluation\n # areaRng - [...] A=4 object area ranges for evaluation\n # maxDets - [1 10 100] M=3 thresholds on max detections per image\n # iouType - ['segm'] set iouType to 'segm', 'bbox' or 'keypoints'\n # iouType replaced the now DEPRECATED useSegm parameter.\n # useCats - [1] if true use category labels for evaluation\n # Note: if useCats=0 category labels are ignored as in proposal scoring.\n # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.\n #\n # evaluate(): evaluates detections on every image and every category and\n # concats the results into the \"evalImgs\" with fields:\n # dtIds - [1xD] id for each of the D detections (dt)\n # gtIds - [1xG] id for each of the G ground truths (gt)\n # dtMatches - [TxD] matching gt id at each IoU or 0\n # gtMatches - [TxG] matching dt id at each IoU or 0\n # dtScores - [1xD] confidence of each dt\n # gtIgnore - [1xG] ignore flag for each gt\n # dtIgnore - [TxD] ignore flag for each dt at each IoU\n #\n # accumulate(): accumulates the per-image, per-category evaluation\n # results in \"evalImgs\" into the dictionary \"eval\" with fields:\n # params - parameters used for evaluation\n # date - date evaluation was performed\n # counts - [T,R,K,A,M] parameter dimensions (see above)\n # precision - [TxRxKxAxM] precision for every evaluation setting\n # recall - [TxKxAxM] max recall for every evaluation setting\n # Note: precision and recall==-1 for settings with no gt objects.\n #\n # See also coco, mask, pycocoDemo, pycocoEvalDemo\n #\n # Microsoft COCO Toolbox. version 2.0\n # Data, paper, and tutorials available at: http://mscoco.org/\n # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.\n # Licensed under the Simplified BSD License [see coco/license.txt]\n def __init__(self, cocoGt=None, cocoDt=None, iouType='segm'):\n '''\n Initialize CocoEval using coco APIs for gt and dt\n :param cocoGt: coco object with ground truth annotations\n :param cocoDt: coco object with detection results\n :return: None\n '''\n if not iouType:\n print('iouType not specified. use default iouType segm')\n self.cocoGt = cocoGt # ground truth COCO API\n self.cocoDt = cocoDt # detections COCO API\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results [KxAxI] elements\n self.eval = {} # accumulated evaluation results\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n self.params = Params(iouType=iouType) # parameters\n self._paramsEval = {} # parameters for evaluation\n self.stats = [] # result summarization\n self.ious = {} # ious between all gts and dts\n if not cocoGt is None:\n self.params.imgIds = sorted(cocoGt.getImgIds())\n self.params.catIds = sorted(cocoGt.getCatIds())\n\n\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n if p.useCats:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n else:\n gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))\n dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(gts, self.cocoGt)\n _toMask(dts, self.cocoDt)\n # set ignore flag\n for gt in gts:\n gt['ignore'] = gt['ignore'] if 'ignore' in gt else 0\n gt['ignore'] = 'iscrowd' in gt and gt['iscrowd']\n if p.iouType == 'keypoints':\n gt['ignore'] = (gt['num_keypoints'] == 0) or gt['ignore']\n self._gts = defaultdict(list) # gt for evaluation\n self._dts = defaultdict(list) # dt for evaluation\n for gt in gts:\n self._gts[gt['image_id'], gt['category_id']].append(gt)\n for dt in dts:\n self._dts[dt['image_id'], dt['category_id']].append(dt)\n self.evalImgs = defaultdict(list) # per-image per-category evaluation results\n self.eval = {} # accumulated evaluation results\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n if p.iouType == 'segm' or p.iouType == 'bbox':\n computeIoU = self.computeIoU\n elif p.iouType == 'keypoints':\n computeIoU = self.computeOks\n self.ious = {(imgId, catId): computeIoU(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n\n evaluateImg = self.evaluateImg\n maxDet = p.maxDets[-1]\n self.evalImgs = [evaluateImg(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def computeOks(self, imgId, catId):\n p = self.params\n # dimention here should be Nxm\n gts = self._gts[imgId, catId]\n dts = self._dts[imgId, catId]\n inds = np.argsort([-d['score'] for d in dts], kind='mergesort')\n dts = [dts[i] for i in inds]\n if len(dts) > p.maxDets[-1]:\n dts = dts[0:p.maxDets[-1]]\n # if len(gts) == 0 and len(dts) == 0:\n if len(gts) == 0 or len(dts) == 0:\n return []\n ious = np.zeros((len(dts), len(gts)))\n sigmas = p.kpt_oks_sigmas\n vars = (sigmas * 2)**2\n k = len(sigmas)\n # compute oks between each detection and ground truth object\n for j, gt in enumerate(gts):\n # create bounds for ignore regions(double the gt bbox)\n g = np.array(gt['keypoints'])\n xg = g[0::3]; yg = g[1::3]; vg = g[2::3]\n k1 = np.count_nonzero(vg > 0)\n bb = gt['bbox']\n x0 = bb[0] - bb[2]; x1 = bb[0] + bb[2] * 2\n y0 = bb[1] - bb[3]; y1 = bb[1] + bb[3] * 2\n for i, dt in enumerate(dts):\n d = np.array(dt['keypoints'])\n xd = d[0::3]; yd = d[1::3]\n if k1>0:\n # measure the per-keypoint distance if keypoints visible\n dx = xd - xg\n dy = yd - yg\n else:\n # measure minimum distance to keypoints in (x0,y0) & (x1,y1)\n z = np.zeros((k))\n dx = np.max((z, x0-xd),axis=0)+np.max((z, xd-x1),axis=0)\n dy = np.max((z, y0-yd),axis=0)+np.max((z, yd-y1),axis=0)\n e = (dx**2 + dy**2) / vars / (gt['area']+np.spacing(1)) / 2\n if k1 > 0:\n e=e[vg > 0]\n ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]\n return ious\n\n def evaluateImg(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n if p.useCats:\n gt = self._gts[imgId,catId]\n dt = self._dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return None\n\n for g in gt:\n if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):\n g['_ignore'] = 1\n else:\n g['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')\n gt = [gt[i] for i in gtind]\n dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in dtind[0:maxDet]]\n iscrowd = [int(o['iscrowd']) for o in gt]\n # load computed ious\n ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]\n\n T = len(p.iouThrs)\n G = len(gt)\n D = len(dt)\n gtm = np.zeros((T,G))\n dtm = np.zeros((T,D))\n gtIg = np.array([g['_ignore'] for g in gt])\n dtIg = np.zeros((T,D))\n if not len(ious)==0:\n for tind, t in enumerate(p.iouThrs):\n for dind, d in enumerate(dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for gind, g in enumerate(gt):\n # if this gt already matched, and not a crowd, continue\n if gtm[tind,gind]>0 and not iscrowd[gind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and gtIg[m]==0 and gtIg[gind]==1:\n break\n # continue to next gt unless better match made\n if ious[dind,gind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious[dind,gind]\n m=gind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n dtIg[tind,dind] = gtIg[m]\n dtm[tind,dind] = gt[m]['id']\n gtm[tind,m] = d['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))\n dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'dtIds': [d['id'] for d in dt],\n 'gtIds': [g['id'] for g in gt],\n 'dtMatches': dtm,\n 'gtMatches': gtm,\n 'dtScores': [d['score'] for d in dt],\n 'gtIgnore': gtIg,\n 'dtIgnore': dtIg,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results...')\n tic = time.time()\n if not self.evalImgs:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['dtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n\n dtm = np.concatenate([e['dtMatches'][:,0:maxDet] for e in E], axis=1)[:,inds]\n dtIg = np.concatenate([e['dtIgnore'][:,0:maxDet] for e in E], axis=1)[:,inds]\n gtIg = np.concatenate([e['gtIgnore'] for e in E])\n npig = np.count_nonzero(gtIg==0 )\n if npig == 0:\n continue\n tps = np.logical_and( dtm, np.logical_not(dtIg) )\n fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg) )\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n self.eval = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Average Precision' if ap == 1 else 'Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n def _summarizeDets():\n stats = np.zeros((12,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[2])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[2])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[2])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[2])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[2])\n stats[10] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[2])\n stats[11] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[2])\n return stats\n def _summarizeKps():\n stats = np.zeros((10,))\n stats[0] = _summarize(1, maxDets=20)\n stats[1] = _summarize(1, maxDets=20, iouThr=.5)\n stats[2] = _summarize(1, maxDets=20, iouThr=.75)\n stats[3] = _summarize(1, maxDets=20, areaRng='medium')\n stats[4] = _summarize(1, maxDets=20, areaRng='large')\n stats[5] = _summarize(0, maxDets=20)\n stats[6] = _summarize(0, maxDets=20, iouThr=.5)\n stats[7] = _summarize(0, maxDets=20, iouThr=.75)\n stats[8] = _summarize(0, maxDets=20, areaRng='medium')\n stats[9] = _summarize(0, maxDets=20, areaRng='large')\n return stats\n if not self.eval:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n elif iouType == 'keypoints':\n summarize = _summarizeKps\n self.stats = summarize()\n\n def __str__(self):\n self.summarize()\n\nclass Params:\n '''\n Params for coco evaluation api\n '''\n def setDetParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [1, 10, 100]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [0 ** 2, 32 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'small', 'medium', 'large']\n self.useCats = 1\n\n def setKpParams(self):\n self.imgIds = []\n self.catIds = []\n # np.arange causes trouble. the data point on arange is slightly larger than the true value\n self.iouThrs = np.linspace(.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)\n self.recThrs = np.linspace(.0, 1.00, int(np.round((1.00 - .0) / .01)) + 1, endpoint=True)\n self.maxDets = [20]\n self.areaRng = [[0 ** 2, 1e5 ** 2], [32 ** 2, 96 ** 2], [96 ** 2, 1e5 ** 2]]\n self.areaRngLbl = ['all', 'medium', 'large']\n self.useCats = 1\n self.kpt_oks_sigmas = np.array([.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, 1.07, 1.07, 1.07, .87, .87, .89, .89]) / 10.0\n\n def __init__(self, iouType='segm'):\n if iouType == 'segm' or iouType == 'bbox':\n self.setDetParams()\n elif iouType == 'keypoints':\n self.setKpParams()\n else:\n raise Exception('iouType not supported')\n self.iouType = iouType\n # useSegm is deprecated\n self.useSegm = None\n"
] | [
[
"numpy.logical_not",
"numpy.spacing",
"numpy.unique",
"numpy.cumsum",
"numpy.ones",
"numpy.concatenate",
"numpy.round",
"numpy.max",
"numpy.mean",
"numpy.count_nonzero",
"numpy.searchsorted",
"numpy.exp",
"numpy.argsort",
"numpy.repeat",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mastermind88/plotly.py | [
"df19fc702b309586cc24e25373b87e8bdbb3ff60"
] | [
"packages/python/plotly/plotly/tests/test_optional/optional_utils.py"
] | [
"from __future__ import absolute_import\n\nimport numpy as np\n\nfrom plotly import optional_imports\nfrom plotly.tests.utils import is_num_list\nfrom plotly.utils import get_by_path, node_generator\n\nimport copy\n\nmatplotlylib = optional_imports.get_module(\"plotly.matplotlylib\")\n\nif matplotlylib:\n import matplotlib\n\n # Force matplotlib to not use any Xwindows backend.\n matplotlib.use(\"Agg\")\n from plotly.matplotlylib import Exporter, PlotlyRenderer\n\n\ndef run_fig(fig):\n renderer = PlotlyRenderer()\n exporter = Exporter(renderer)\n print(exporter)\n exporter.run(fig)\n return renderer\n\n\nclass NumpyTestUtilsMixin(object):\n \"\"\"Provides some helper functions to make testing easier.\"\"\"\n\n def _format_path(self, path):\n str_path = [repr(p) for p in path]\n return \"[\" + \"][\".join(sp for sp in str_path) + \"]\"\n\n def assert_fig_equal(self, d1, d2, msg=None, ignore=[\"uid\"]):\n \"\"\"\n Helper function for assert_dict_equal\n\n By default removes uid from d1 and/or d2 if present\n then calls assert_dict_equal.\n\n :param (list|tuple) ignore: sequence of key names as\n strings that are removed from both d1 and d2 if\n they exist\n \"\"\"\n # deep copy d1 and d2\n if \"to_plotly_json\" in dir(d1):\n d1_copy = copy.deepcopy(d1.to_plotly_json())\n else:\n d1_copy = copy.deepcopy(d1)\n\n if \"to_plotly_json\" in dir(d2):\n d2_copy = copy.deepcopy(d2.to_plotly_json())\n else:\n d2_copy = copy.deepcopy(d2)\n\n for key in ignore:\n if key in d1_copy.keys():\n del d1_copy[key]\n if key in d2_copy.keys():\n del d2_copy[key]\n\n self.assert_dict_equal(d1_copy, d2_copy, msg=None)\n\n def assert_dict_equal(self, d1, d2, msg=None):\n \"\"\"\n Uses `np.allclose()` on number arrays.\n\n :raises: (AssertionError) Using TestCase's self.failureException\n\n \"\"\"\n self.assertIsInstance(d1, dict, \"First argument is not a dictionary\")\n self.assertIsInstance(d2, dict, \"Second argument is not a dictionary\")\n\n for node, path in node_generator(d1):\n\n # first check that this sub-dict is contained in both dicts\n try:\n comp_node = get_by_path(d2, path)\n except (KeyError, IndexError):\n standard_msg = \"Path {} exists in dict 1, but not dict 2.\".format(path)\n self.fail(self._formatMessage(msg, standard_msg))\n self.assertIsInstance(\n comp_node, dict, \"Value at path {} is not a dict.\".format(path)\n )\n\n # check that each key in the first is contained in the second\n for key, val in node.items():\n if isinstance(val, dict):\n continue # this gets tested as its own node\n\n # check that the values at this key are equal\n val_path = path + (key,)\n try:\n comp_val = comp_node[key]\n except KeyError:\n standard_msg = \"Path {} exists in dict 1, but not dict 2.\".format(\n self._format_path(val_path)\n )\n self.fail(self._formatMessage(msg, standard_msg))\n\n if isinstance(val, np.ndarray) or isinstance(comp_val, np.ndarray):\n if np.array_equal(val, comp_val):\n continue\n elif val == comp_val:\n continue\n\n if is_num_list(val) and is_num_list(comp_val):\n if np.allclose(val, comp_val):\n continue\n\n standard_msg = (\n \"Value comparison failed at path {}.\\n\"\n \"{} != {}\".format(self._format_path(val_path), val, comp_val)\n )\n self.fail(self._formatMessage(msg, standard_msg))\n\n # finally, check that keys in the second are in the first\n for key in comp_node:\n val_path = path + (key,)\n if key not in node:\n standard_msg = \"Path {} exists in dict 2, but not dict 1.\".format(\n self._format_path(val_path)\n )\n self.fail(self._formatMessage(msg, standard_msg))\n"
] | [
[
"matplotlib.use",
"numpy.allclose",
"numpy.array_equal"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
weiyw16/pytorch-CycleGAN-and-pix2pix | [
"432a91ee6ca8dc606ba0116b27b0948abc48f295"
] | [
"mkmydata/makeHDF5_fromPng.py"
] | [
"import os\nimport h5py\nfrom PIL import Image\nimport numpy as np\n\nimport struct\nimport matplotlib.pyplot as plt\nimport torch\nimport torchvision.transforms as transforms\nNX = 276\nNZ = 276\nworkpath = \"./\"\nphase = 'train' #['train', 'test']\nmode = 'B' #['A', 'B']\ncontent = ['vx_split', 'vz_split']\n\nfileNum = 0 \nfor root,dirs,files in os.walk(workpath + content[0]): #遍历统计 \n for each in files: \n if each[-4:] == '.png': \n #print( root,dirs,each) \n fileNum += 1 \n#print(fileNum)\n\noutfile = h5py.File(workpath + phase + mode +'.h5', 'w') #./trainB.h5\noutdata = torch.zeros((2, NX, NZ))\n\nfor count in range(1, fileNum):\n for i, iterm in enumerate(content):\n iname = workpath + iterm + \"/\" + mode + str(count) + \".png\" # ./vz_split/B1.png\n data = Image.open(iname).convert('L').resize((NX, NZ)) # 276*276\n outdata[i] = transforms.ToTensor()(data) #np.matrix(data, dtype='float')/255.0\n #infile = Image.open(iname)\n outfile.create_dataset(mode+str(count), data=outdata, dtype=np.float64) #B1.png\noutfile.close()\nworkpath = \"./\"\nphase = 'train' #['train', 'test']\nmode = 'A' #['A', 'B']\ncontent = ['curl_split', 'div_split']\n\nfileNum = 0 \nfor root,dirs,files in os.walk(workpath + content[0]): #遍历统计 \n for each in files: \n if each[-4:] == '.png': \n #print( root,dirs,each) \n fileNum += 1 \n#print(fileNum)\n\noutfile = h5py.File(workpath + phase + mode +'.h5', 'w') #./trainB.h5\noutdata = torch.zeros((2, NX, NZ))\n\nfor count in range(1, fileNum):\n for i, iterm in enumerate(content):\n iname = workpath + iterm + \"/\" + mode + str(count) + \".png\" # ./vz_split/B1.png\n data = Image.open(iname).convert('L').resize((NX, NZ)) # 276*276\n outdata[i] = transforms.ToTensor()(data) #np.matrix(data, dtype='float')/255.0\n #infile = Image.open(iname)\n outfile.create_dataset(mode+str(count), data=outdata, dtype=np.float64) #B1.png\noutfile.close()\n#%hist -f makeHDF5.py\n"
] | [
[
"torch.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mikpom/genontol | [
"ee3e0aaedf59a1bc373aadd3a4f9dbb0107baf94"
] | [
"genontol/tools.py"
] | [
"# Copyright 2015-2019 Jan Daniel Rudolph (@jdrudolph)\n# Modified version Copyright 2019 Mikhail Pomaznoy (@mikpom)\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nimport pandas as pd\nimport numpy as np\n\ndef _ecdf(x):\n nobs = len(x)\n return np.arange(1,nobs+1)/float(nobs)\n\ndef fdrcorrection(pvals, alpha=0.05):\n \"\"\" benjamini hocheberg fdr correction. inspired by statsmodels \"\"\"\n pvals = np.asarray(pvals)\n pvals_sortind = np.argsort(pvals)\n pvals_sorted = np.take(pvals, pvals_sortind)\n ecdffactor = _ecdf(pvals_sorted)\n reject = pvals_sorted <= ecdffactor*alpha\n if reject.any():\n rejectmax = max(np.nonzero(reject)[0])\n reject[:rejectmax] = True\n pvals_corrected_raw = pvals_sorted / ecdffactor\n pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1]\n del pvals_corrected_raw\n pvals_corrected[pvals_corrected>1] = 1\n pvals_corrected_ = np.empty_like(pvals_corrected)\n pvals_corrected_[pvals_sortind] = pvals_corrected\n del pvals_corrected\n reject_ = np.empty_like(reject)\n reject_[pvals_sortind] = reject\n return reject_, pvals_corrected_\n"
] | [
[
"numpy.take",
"numpy.nonzero",
"numpy.asarray",
"numpy.empty_like",
"numpy.arange",
"numpy.minimum.accumulate",
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Westerley/framework_event2vec | [
"f066f150d0a89c10536255ff3ce26aecd967372e"
] | [
"Cluster/kMeans.py"
] | [
"# Import libraries necessary for this project\nimport numpy as np\nfrom scipy.spatial import distance\nimport random\n\nclass KMeans:\n\n \"\"\"K-Means clustering.\n Parameters\n ----------\n n_clusters : int, default: 2\n The number of clusters\n tol : float, default: 0.01\n Relative tolerance with regards to inertia to declare convergence.\n max_iter : int, default: 100\n Maximum number of iterations of the k-means algorithm.\n metric : {'cosine', 'euclidean', ...}\n Compute distance between each pair of the two collections of inputs. See the link <https://docs.scipy.org/doc/scipy/reference/spatial.distance.html>\n init : {'random', 'random++', 'kmeans++'}\n Method for initialization, defaults to 'kmeans++'.\n n_init :\n Number of time the k-means algorithm will be run with different\n centroid seeds. The final results will be the best output of\n n_init consecutive runs in terms of inertia.\n verbose : boolean\n Verbosity mode. \n \n Attributes\n ----------\n clusters : \n Labels of each point\n centroids : \n Coordinates of cluster centers.\n \"\"\"\n \n def __init__(self, n_clusters=2, tol=0.01, max_iter=100, metric=\"cosine\", init=\"kmeans++\", n_init=10, verbose=True):\n self.n_clusters = n_clusters\n self.tol = tol\n self.max_iter = max_iter\n self.init = init\n self.n_init = n_init\n self.metric = metric\n self.verbose = verbose\n \n def fit(self, data):\n\n \"\"\"Compute k-means clustering.\n Parameters\n ----------\n data : array\n Training instances to cluster.\n \"\"\"\n \n self.min_sse = np.inf\n for n_init in range(1, self.n_init + 1):\n centroids = []\n clusters = np.zeros(len(data))\n centroids = self.init_centroid(data)\n \n old_sse = np.inf\n for i in range(self.max_iter):\n for i in range(len(data)):\n distances = distance.cdist(data[i].reshape(1, -1), centroids, self.metric)\n classification = np.argmin(distances)\n clusters[i] = classification\n \n for i in range(self.n_clusters):\n points = [data[j] for j in range(len(data)) if clusters[j] == i]\n centroids[i] = np.mean(points, axis=0)\n \n new_sse = 0\n for i in range(self.n_clusters):\n new_sse += np.sum([self.sse(data[j], centroids[i]) for j in range(len(data)) if clusters[j] == i])\n gain = old_sse - new_sse\n \n if gain < self.tol:\n if new_sse < self.min_sse:\n self.min_sse, self.clusters, self.centroids = new_sse, clusters, centroids\n if self.verbose:\n print(\"N_INIT={:2d}, SSE={:10.4f}, GAIN={:10.4f}\".format(n_init, new_sse, gain))\n break\n else:\n old_sse = new_sse\n \n return self\n \n def init_centroid(self, data):\n if self.init == \"random\":\n return data[np.random.choice(len(data), self.n_clusters, replace=False)]\n if self.init == \"random++\":\n centroids = []\n cluster_list = [[] for i in range(self.n_clusters)]\n while len(data) != 0:\n idx_data = np.random.choice(len(data), 1, replace=False)\n idx_cluster = np.random.choice(len(cluster_list), 1, replace=False)\n cluster_list[idx_cluster[0]].append(data[idx_data])\n data = np.delete(data, idx_data, 0)\n\n for i in range(self.n_clusters):\n centroids.append(np.mean(cluster_list[i], axis=0)[0])\n return np.array(centroids)\n if self.init == \"kmeans++\":\n centroids = random.sample(list(data), 1)\n while len(centroids) < self.n_clusters:\n min_distances = [np.min(distance.cdist(data[i].reshape(1, -1), centroids, self.metric)) for i in range(len(data))]\n probs = min_distances / np.sum(min_distances)\n cumprobs = probs.cumsum() # Return the cumulative sum of the elements along a given axis.\n r = random.random()\n ind = np.where(cumprobs >= r)[0][0]\n centroids.append(data[ind])\n return np.array(centroids)\n \n def sse(self, data, cluster):\n return np.sum(distance.cdist(data.reshape(1, -1), cluster.reshape(1, -1), self.metric))"
] | [
[
"numpy.delete",
"numpy.argmin",
"numpy.mean",
"numpy.array",
"numpy.where",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frikol3000/DinoNN | [
"9722fce4be109feb9036f01af488863ce33f2cae"
] | [
"create_dataset.py"
] | [
"from PIL import ImageGrab\r\nfrom Chrome import Chrome\r\nfrom pynput import keyboard\r\nimport keyboard as kb\r\nimport pandas as pd\r\nfrom config import DATASET_FILE_NAME\r\n\r\njump = 0\r\nrun = 0\r\nfiles = []\r\nlabels = []\r\n\r\n\r\ndef main():\r\n global jump, run\r\n try:\r\n df = pd.read_csv(DATASET_FILE_NAME)\r\n jump = df[df['label'] == 'Jump'].count()['file']\r\n run = df[df['label'] == 'Run'].count()['file']\r\n except FileNotFoundError as filenotfoundex:\r\n print(\"Warning! No such directory was found.\")\r\n\r\n print(jump, run)\r\n chrome = Chrome('https://trex-runner.com/', 'runner-canvas')\r\n\r\n def on_press(key):\r\n global files, labels, jump, run\r\n\r\n if key == keyboard.KeyCode(char='q'):\r\n chrome.quit()\r\n try:\r\n database = pd.read_csv(DATASET_FILE_NAME)\r\n database = database.append(pd.DataFrame({'file': files, 'label': labels}), ignore_index=True)\r\n except FileNotFoundError as filenotfoundex:\r\n print(\"Warning! No such directory was found.\")\r\n database = pd.DataFrame({'file': files, 'label': labels})\r\n database.to_csv(DATASET_FILE_NAME, index=False)\r\n return False\r\n\r\n img = ImageGrab.grab()\r\n img = img.crop(chrome.coords)\r\n\r\n if key == keyboard.Key.space:\r\n jump += 1\r\n files.append(f\"dataset\\\\dino_jump_{jump}.png\")\r\n labels.append('Jump')\r\n img.save(f\"dataset\\\\dino_jump_{jump}.png\")\r\n if key == keyboard.KeyCode(char='e'):\r\n if not kb.is_pressed('space'):\r\n run += 1\r\n files.append(f\"dataset\\\\dino_run_{run}.png\")\r\n labels.append('Run')\r\n img.save(f\"dataset\\\\dino_run_{run}.png\")\r\n\r\n listener = keyboard.Listener(on_press=on_press)\r\n listener.start()\r\n listener.join()\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n"
] | [
[
"pandas.read_csv",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
justin1121/federated | [
"3559af64e8417ccb1b12a9d26f366b721bef021b"
] | [
"tensorflow_federated/python/core/impl/reference_executor.py"
] | [
"# Copyright 2018, The TensorFlow Federated Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"A simple interpreted reference executor.\n\nThis executor is designed for simplicity, not for performance. It is intended\nfor use in unit tests, as the golden standard and point of comparison for other\nexecutors. Unit test suites for other executors should include a test that runs\nthem side by side and compares their results against this executor for a number\nof computations.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\n\nimport numpy as np\nimport six\nfrom six.moves import range\nfrom six.moves import zip\nimport tensorflow as tf\n\nfrom tensorflow_federated.python.common_libs import anonymous_tuple\nfrom tensorflow_federated.python.common_libs import py_typecheck\nfrom tensorflow_federated.python.core.api import computation_base\nfrom tensorflow_federated.python.core.api import computation_types\nfrom tensorflow_federated.python.core.api import placements\nfrom tensorflow_federated.python.core.impl import compiler_pipeline\nfrom tensorflow_federated.python.core.impl import computation_building_blocks\nfrom tensorflow_federated.python.core.impl import computation_impl\nfrom tensorflow_federated.python.core.impl import context_base\nfrom tensorflow_federated.python.core.impl import dtype_utils\nfrom tensorflow_federated.python.core.impl import graph_utils\nfrom tensorflow_federated.python.core.impl import intrinsic_defs\nfrom tensorflow_federated.python.core.impl import placement_literals\nfrom tensorflow_federated.python.core.impl import tensorflow_deserialization\nfrom tensorflow_federated.python.core.impl import transformations\nfrom tensorflow_federated.python.core.impl import type_constructors\nfrom tensorflow_federated.python.core.impl import type_utils\n\n\nclass ComputedValue(object):\n \"\"\"A container for values computed by the reference executor.\"\"\"\n\n def __init__(self, value, type_spec):\n \"\"\"Creates a value with given raw payload `value` and TFF type `type_spec`.\n\n For performance reasons, the constructor does not check that the payload is\n of the corresponding type. It is the responsibility of the caller to do so,\n e.g., by calling the helper function `to_representation_for_type()`.\n See the definition of this function for the value representations.\n\n Args:\n value: The raw payload (the representation of the computed value), the\n exact form of which depends on the type, as describd above.\n type_spec: An instance of `tff.Type` or something convertible to it that\n describes the TFF type of this value.\n \"\"\"\n type_spec = computation_types.to_type(type_spec)\n py_typecheck.check_type(type_spec, computation_types.Type)\n self._type_signature = type_spec\n self._value = value\n\n @property\n def type_signature(self):\n return self._type_signature\n\n @property\n def value(self):\n return self._value\n\n def __str__(self):\n return 'ComputedValue({}, {})'.format(\n str(self._value), str(self._type_signature))\n\n\ndef to_representation_for_type(value, type_spec, callable_handler=None):\n \"\"\"Verifies or converts the `value` representation to match `type_spec`.\n\n This method first tries to determine whether `value` is a valid representation\n of TFF type `type_spec`. If so, it is returned unchanged. If not, but if it\n can be converted into a valid representation, it is converted to such, and the\n valid representation is returned. If no conversion to a valid representation\n is possible, TypeError is raised.\n\n The accepted forms of `value` for vaqrious TFF types as as follows:\n\n * For TFF tensor types listed in `dtypes.TENSOR_REPRESENTATION_TYPES`.\n\n * For TFF named tuple types, instances of `anonymous_tuple.AnonymousTuple`.\n\n * For TFF sequences, Python lists.\n\n * For TFF functional types, Python callables that accept a single argument\n that is an instance of `ComputedValue` (if the function has a parameter)\n or `None` (otherwise), and return a `ComputedValue` instance as a result.\n This function only verifies that `value` is a callable.\n\n * For TFF abstract types, there is no valid representation. The reference\n executor requires all types in an executable computation to be concrete.\n\n * For TFF placement types, the valid representations are the placement\n literals (currently only `tff.SERVER` and `tff.CLIENTS`).\n\n * For TFF federated types with `all_equal` set to `True`, the representation\n is the same as the representation of the member constituent (thus, e.g.,\n a valid representation of `int32@SERVER` is the same as that of `int32`).\n For those types that have `all_equal_` set to `False`, the representation\n is a Python list of member constituents.\n\n NOTE: This function does not attempt at validating that the sizes of lists\n that represent federated values match the corresponding placemenets. The\n cardinality analysis is a separate step, handled by the reference executor\n at a different point. As long as values can be packed into a Python list,\n they are accepted as they are.\n\n Args:\n value: The raw representation of a value to compare against `type_spec` and\n potentially to be converted into a canonical form for the given TFF type.\n type_spec: The TFF type, an instance of `tff.Type` or something convertible\n to it that determines what the valid representation should be.\n callable_handler: The function to invoke to handle TFF functional types. If\n this is `None`, functional types are not supported. The function must\n accept `value` and `type_spec` as arguments and return the converted valid\n representation, just as `to_representation_for_type`.\n\n Returns:\n Either `value` itself, or the `value` converted into a valid representation\n for `type_spec`.\n\n Raises:\n TypeError: If `value` is not a valid representation for given `type_spec`.\n NotImplementedError: If verification for `type_spec` is not supported.\n \"\"\"\n type_spec = computation_types.to_type(type_spec)\n py_typecheck.check_type(type_spec, computation_types.Type)\n if callable_handler is not None:\n py_typecheck.check_callable(callable_handler)\n\n # NOTE: We do not simply call `type_utils.infer_type()` on `value`, as the\n # representations of values in the reference executor are only a subset of\n # the Python types recognized by that helper function.\n\n if isinstance(type_spec, computation_types.TensorType):\n if tf.executing_eagerly() and isinstance(value, tf.Tensor):\n value = value.numpy()\n py_typecheck.check_type(value, dtype_utils.TENSOR_REPRESENTATION_TYPES)\n inferred_type_spec = type_utils.infer_type(value)\n if not type_utils.is_assignable_from(type_spec, inferred_type_spec):\n raise TypeError(\n 'The tensor type {} of the value representation does not match '\n 'the type spec {}.'.format(str(inferred_type_spec), str(type_spec)))\n return value\n elif isinstance(type_spec, computation_types.NamedTupleType):\n type_spec_elements = anonymous_tuple.to_elements(type_spec)\n # Special-casing unodered dictionaries to allow their elements to be fed in\n # the order in which they're defined in the named tuple type.\n if (isinstance(value, dict) and\n (set(value.keys()) == set(k for k, _ in type_spec_elements))):\n value = collections.OrderedDict(\n [(k, value[k]) for k, _ in type_spec_elements])\n value = anonymous_tuple.from_container(value)\n value_elements = anonymous_tuple.to_elements(value)\n if len(value_elements) != len(type_spec_elements):\n raise TypeError(\n 'The number of elements {} in the value tuple {} does not match the '\n 'number of elements {} in the type spec {}.'.format(\n len(value_elements), str(value), len(type_spec_elements),\n str(type_spec)))\n result_elements = []\n for index, (type_elem_name, type_elem) in enumerate(type_spec_elements):\n value_elem_name, value_elem = value_elements[index]\n if value_elem_name not in [type_elem_name, None]:\n raise TypeError(\n 'Found element named `{}` where `{}` was expected at position {} '\n 'in the value tuple. Value: {}. Type: {}'.format(\n value_elem_name, type_elem_name, index, value, type_spec))\n converted_value_elem = to_representation_for_type(value_elem, type_elem,\n callable_handler)\n result_elements.append((type_elem_name, converted_value_elem))\n return anonymous_tuple.AnonymousTuple(result_elements)\n elif isinstance(type_spec, computation_types.SequenceType):\n if isinstance(value, tf.data.Dataset):\n if tf.executing_eagerly():\n return [\n to_representation_for_type(v, type_spec.element, callable_handler)\n for v in value\n ]\n else:\n raise ValueError(\n 'Processing `tf.data.Datasets` outside of eager mode is not '\n 'currently supported.')\n return [\n to_representation_for_type(v, type_spec.element, callable_handler)\n for v in value\n ]\n elif isinstance(type_spec, computation_types.FunctionType):\n if callable_handler is not None:\n return callable_handler(value, type_spec)\n else:\n raise TypeError(\n 'Values that are callables have been explicitly disallowed '\n 'in this context. If you would like to supply here a function '\n 'as a parameter, please construct a computation that contains '\n 'this call.')\n elif isinstance(type_spec, computation_types.AbstractType):\n raise TypeError(\n 'Abstract types are not supported by the reference executor.')\n elif isinstance(type_spec, computation_types.PlacementType):\n py_typecheck.check_type(value, placement_literals.PlacementLiteral)\n return value\n elif isinstance(type_spec, computation_types.FederatedType):\n if type_spec.all_equal:\n return to_representation_for_type(value, type_spec.member,\n callable_handler)\n elif type_spec.placement is not placements.CLIENTS:\n raise TypeError(\n 'Unable to determine a valid value representation for a federated '\n 'type with non-equal members placed at {}.'.format(\n str(type_spec.placement)))\n elif not isinstance(value, (list, tuple)):\n raise ValueError('Please pass a list or tuple to any function that'\n ' expects a federated type placed at {};'\n ' you passed {}'.format(type_spec.placement, value))\n else:\n return [\n to_representation_for_type(v, type_spec.member, callable_handler)\n for v in value\n ]\n else:\n raise NotImplementedError(\n 'Unable to determine valid value representation for {} for what '\n 'is currently an unsupported TFF type {}.'.format(\n str(value), str(type_spec)))\n\n\ndef stamp_computed_value_into_graph(value, graph):\n \"\"\"Stamps `value` in `graph`.\n\n Args:\n value: An instance of `ComputedValue`.\n graph: The graph to stamp in.\n\n Returns:\n A Python object made of tensors stamped into `graph`, `tf.data.Dataset`s,\n and `anonymous_tuple.AnonymousTuple`s that structurally corresponds to the\n value passed at input.\n \"\"\"\n if value is None:\n return None\n else:\n py_typecheck.check_type(value, ComputedValue)\n value = ComputedValue(\n to_representation_for_type(value.value, value.type_signature),\n value.type_signature)\n py_typecheck.check_type(graph, tf.Graph)\n if isinstance(value.type_signature, computation_types.TensorType):\n if isinstance(value.value, np.ndarray):\n value_type = computation_types.TensorType(\n tf.dtypes.as_dtype(value.value.dtype),\n tf.TensorShape(value.value.shape))\n type_utils.check_assignable_from(value.type_signature, value_type)\n with graph.as_default():\n return tf.constant(value.value)\n else:\n with graph.as_default():\n return tf.constant(\n value.value,\n dtype=value.type_signature.dtype,\n shape=value.type_signature.shape)\n elif isinstance(value.type_signature, computation_types.NamedTupleType):\n elements = anonymous_tuple.to_elements(value.value)\n type_elements = anonymous_tuple.to_elements(value.type_signature)\n stamped_elements = []\n for idx, (k, v) in enumerate(elements):\n computed_v = ComputedValue(v, type_elements[idx][1])\n stamped_v = stamp_computed_value_into_graph(computed_v, graph)\n stamped_elements.append((k, stamped_v))\n return anonymous_tuple.AnonymousTuple(stamped_elements)\n elif isinstance(value.type_signature, computation_types.SequenceType):\n return graph_utils.make_data_set_from_elements(\n graph, value.value, value.type_signature.element)\n else:\n raise NotImplementedError(\n 'Unable to embed a computed value of type {} in graph.'.format(\n str(value.type_signature)))\n\n\ndef capture_computed_value_from_graph(value, type_spec):\n \"\"\"Captures `value` from a TensorFlow graph.\n\n Args:\n value: A Python object made of tensors in `graph`, `tf.data.Dataset`s,\n `anonymous_tuple.AnonymousTuple`s and other structures, to be captured as\n an instance of `ComputedValue`.\n type_spec: The type of the value to be captured.\n\n Returns:\n An instance of `ComputedValue`.\n \"\"\"\n type_spec = computation_types.to_type(type_spec)\n py_typecheck.check_type(type_spec, computation_types.Type)\n value = type_utils.to_canonical_value(value)\n return ComputedValue(to_representation_for_type(value, type_spec), type_spec)\n\n\ndef run_tensorflow(comp, arg):\n \"\"\"Runs a compiled TensorFlow computation `comp` with argument `arg`.\n\n Args:\n comp: An instance of `computation_building_blocks.CompiledComputation` with\n embedded TensorFlow code.\n arg: An instance of `ComputedValue` that represents the argument, or `None`\n if the compuation expects no argument.\n\n Returns:\n An instance of `ComputedValue` with the result.\n \"\"\"\n py_typecheck.check_type(comp, computation_building_blocks.CompiledComputation)\n if arg is not None:\n py_typecheck.check_type(arg, ComputedValue)\n with tf.Graph().as_default() as graph:\n stamped_arg = stamp_computed_value_into_graph(arg, graph)\n init_op, result = (\n tensorflow_deserialization.deserialize_and_call_tf_computation(\n comp.proto, stamped_arg, graph))\n with tf.Session(graph=graph) as sess:\n if init_op:\n sess.run(init_op)\n result_val = graph_utils.fetch_value_in_session(sess, result)\n return capture_computed_value_from_graph(result_val,\n comp.type_signature.result)\n\n\ndef numpy_cast(value, dtype, shape):\n \"\"\"Returns a Numpy representation of `value` for given `dtype` and `shape`.\n\n Args:\n value: A tensor value (such as a numpy or a raw Python type).\n dtype: An instance of tf.DType.\n shape: An instance of tf.TensorShape.\n\n Returns:\n The Numpy represantation of `value` that matches `dtype` and `shape`.\n\n Raises:\n TypeError: If the `value` cannot be converted to the given `dtype` and the\n desired `shape`.\n \"\"\"\n py_typecheck.check_type(dtype, tf.DType)\n py_typecheck.check_type(shape, tf.TensorShape)\n value_as_numpy_array = np.array(value, dtype=dtype.as_numpy_dtype)\n if list(value_as_numpy_array.shape) != shape.dims:\n raise TypeError('Expected shape {}, found {}.'.format(\n str(shape.dims), str(value_as_numpy_array.shape)))\n # NOTE: We don't want to make things more complicated than necessary by\n # returning the result as an array if it's just a plain scalar, so we\n # special-case this by pulling the singleton `np.ndarray`'s element out.\n if len(value_as_numpy_array.shape) > 0: # pylint: disable=g-explicit-length-test\n return value_as_numpy_array\n else:\n return value_as_numpy_array.flatten()[0]\n\n\ndef multiply_by_scalar(value, multiplier):\n \"\"\"Multiplies an instance of `ComputedValue` by a given scalar.\n\n Args:\n value: An instance of `ComputedValue` to multiply.\n multiplier: A scalar multipler.\n\n Returns:\n An instance of `ComputedValue` that represents the result of multiplication.\n \"\"\"\n py_typecheck.check_type(value, ComputedValue)\n py_typecheck.check_type(multiplier, (float, np.float32))\n if isinstance(value.type_signature, computation_types.TensorType):\n result_val = numpy_cast(value.value * multiplier,\n value.type_signature.dtype,\n value.type_signature.shape)\n return ComputedValue(result_val, value.type_signature)\n elif isinstance(value.type_signature, computation_types.NamedTupleType):\n elements = anonymous_tuple.to_elements(value.value)\n type_elements = anonymous_tuple.to_elements(value.type_signature)\n result_elements = []\n for idx, (k, v) in enumerate(elements):\n multiplied_v = multiply_by_scalar(\n ComputedValue(v, type_elements[idx][1]), multiplier).value\n result_elements.append((k, multiplied_v))\n return ComputedValue(\n anonymous_tuple.AnonymousTuple(result_elements), value.type_signature)\n else:\n raise NotImplementedError(\n 'Multiplying vlues of type {} by a scalar is unsupported.'.format(\n str(value.type_signature)))\n\n\ndef get_cardinalities(value):\n \"\"\"Get a dictionary mapping placements to their cardinalities from `value`.\n\n Args:\n value: An instance of `ComputationValue`.\n\n Returns:\n A dictionary from placement literals to the cardinalities of each placement.\n \"\"\"\n py_typecheck.check_type(value, ComputedValue)\n if isinstance(value.type_signature, computation_types.FederatedType):\n if value.type_signature.all_equal:\n return {}\n else:\n py_typecheck.check_type(value.value, list)\n return {value.type_signature.placement: len(value.value)}\n elif isinstance(\n value.type_signature,\n (computation_types.TensorType, computation_types.SequenceType,\n computation_types.AbstractType, computation_types.FunctionType,\n computation_types.PlacementType)):\n return {}\n elif isinstance(value.type_signature, computation_types.NamedTupleType):\n py_typecheck.check_type(value.value, anonymous_tuple.AnonymousTuple)\n result = {}\n for idx, (_, elem_type) in enumerate(\n anonymous_tuple.to_elements(value.type_signature)):\n for k, v in six.iteritems(\n get_cardinalities(ComputedValue(value.value[idx], elem_type))):\n if k not in result:\n result[k] = v\n elif result[k] != v:\n raise ValueError(\n 'Mismatching cardinalities for {}: {} vs. {}.'.format(\n str(k), str(result[k]), str(v)))\n return result\n else:\n raise NotImplementedError(\n 'Unable to get cardinalities from a value of TFF type {}.'.format(\n str(value.type_signature)))\n\n\nclass ComputationContext(object):\n \"\"\"Encapsulates context/state in which computations or parts thereof run.\"\"\"\n\n def __init__(self,\n parent_context=None,\n local_symbols=None,\n cardinalities=None):\n \"\"\"Constructs a new execution context.\n\n Args:\n parent_context: The parent context, or `None` if this is the root.\n local_symbols: The dictionary of local symbols defined in this context, or\n `None` if there are none. The keys (names) are of a string type, and the\n values (what the names bind to) are of type `ComputedValue`.\n cardinalities: Placements cardinalities, if defined.\n \"\"\"\n if parent_context is not None:\n py_typecheck.check_type(parent_context, ComputationContext)\n self._parent_context = parent_context\n self._local_symbols = {}\n if local_symbols is not None:\n py_typecheck.check_type(local_symbols, dict)\n for k, v in six.iteritems(local_symbols):\n py_typecheck.check_type(k, six.string_types)\n py_typecheck.check_type(v, ComputedValue)\n self._local_symbols[str(k)] = v\n if cardinalities is not None:\n py_typecheck.check_type(cardinalities, dict)\n for k, v in six.iteritems(cardinalities):\n py_typecheck.check_type(k, placement_literals.PlacementLiteral)\n py_typecheck.check_type(v, int)\n self._cardinalities = cardinalities\n else:\n self._cardinalities = None\n\n def resolve_reference(self, name):\n \"\"\"Resolves the given reference `name` in this context.\n\n Args:\n name: The string name to resolve.\n\n Returns:\n An instance of `ComputedValue` corresponding to this name.\n\n Raises:\n ValueError: If the name cannot be resolved.\n \"\"\"\n py_typecheck.check_type(name, six.string_types)\n value = self._local_symbols.get(str(name))\n if value is not None:\n return value\n elif self._parent_context is not None:\n return self._parent_context.resolve_reference(name)\n else:\n raise ValueError(\n 'The name \\'{}\\' is not defined in this context.'.format(name))\n\n def get_cardinality(self, placement):\n \"\"\"Returns the cardinality for `placement`.\n\n Args:\n placement: The placement, for which to return cardinality.\n \"\"\"\n py_typecheck.check_type(placement, placement_literals.PlacementLiteral)\n if self._cardinalities is not None and placement in self._cardinalities:\n return self._cardinalities[placement]\n elif self._parent_context is not None:\n return self._parent_context.get_cardinality(placement)\n else:\n raise ValueError('Unable to determine the cardinality for {}.'.format(\n str(placement)))\n\n\ndef fit_argument(arg, type_spec, context):\n \"\"\"Fits the given argument `arg` to match the given parameter `type_spec`.\n\n Args:\n arg: The argument to fit, an instance of `ComputedValue`.\n type_spec: The type of the parameter to fit to, an instance of `tff.Type` or\n something convertible to it.\n context: The context in which to perform the fitting, either an instance of\n `ComputationContext`, or `None` if unspecified.\n\n Returns:\n An instance of `ComputationValue` with the payload from `arg`, but matching\n the `type_spec` in the given context.\n\n Raises:\n TypeError: If the types mismatch.\n ValueError: If the value is invalid or does not fit the requested type.\n \"\"\"\n py_typecheck.check_type(arg, ComputedValue)\n type_spec = computation_types.to_type(type_spec)\n py_typecheck.check_type(type_spec, computation_types.Type)\n if context is not None:\n py_typecheck.check_type(context, ComputationContext)\n type_utils.check_assignable_from(type_spec, arg.type_signature)\n if arg.type_signature == type_spec:\n return arg\n elif isinstance(type_spec, computation_types.NamedTupleType):\n py_typecheck.check_type(arg.value, anonymous_tuple.AnonymousTuple)\n result_elements = []\n for idx, (elem_name, elem_type) in enumerate(\n anonymous_tuple.to_elements(type_spec)):\n elem_val = ComputedValue(arg.value[idx], arg.type_signature[idx])\n if elem_val != elem_type:\n elem_val = fit_argument(elem_val, elem_type, context)\n result_elements.append((elem_name, elem_val.value))\n return ComputedValue(\n anonymous_tuple.AnonymousTuple(result_elements), type_spec)\n elif isinstance(type_spec, computation_types.FederatedType):\n type_utils.check_federated_type(\n arg.type_signature, placement=type_spec.placement)\n if arg.type_signature.all_equal:\n member_val = ComputedValue(arg.value, arg.type_signature.member)\n if type_spec.member != arg.type_signature.member:\n member_val = fit_argument(member_val, type_spec.member, context)\n if type_spec.all_equal:\n return ComputedValue(member_val.value, type_spec)\n else:\n cardinality = context.get_cardinality(type_spec.placement)\n return ComputedValue([member_val.value for _ in range(cardinality)],\n type_spec)\n elif type_spec.all_equal:\n raise TypeError('Cannot fit a non all-equal {} into all-equal {}.'.format(\n str(arg.type_signature), str(type_spec)))\n else:\n py_typecheck.check_type(arg.value, list)\n\n def _fit_member_val(x):\n x_val = ComputedValue(x, arg.type_signature.member)\n return fit_argument(x_val, type_spec.member, context).value\n\n return ComputedValue([_fit_member_val(x) for x in arg.value], type_spec)\n else:\n # TODO(b/113123634): Possibly add more conversions, e.g., for tensor types.\n return arg\n\n\nclass ReferenceExecutor(context_base.Context):\n \"\"\"A simple interpreted reference executor.\n\n This executor is to be used by default in unit tests and simple applications\n such as colab notebooks and turorials. It is intended to serve as the gold\n standard of correctness for all other executors to compare against. As such,\n it is designed for simplicity and ease of reasoning about correctness, rather\n than for high performance. We will tolerate copying values, marshaling and\n unmarshaling when crossing TF graph boundary, etc., for the sake of keeping\n the logic minimal. The executor can be reused across multiple calls, so any\n state associated with individual executions is maintained separately from\n this class. High-performance simulations on large data sets will require a\n separate executor optimized for performance. This executor is plugged in as\n the handler of computation invocations at the top level of the context stack.\n \"\"\"\n\n def __init__(self, compiler=None):\n \"\"\"Creates a reference executor.\n\n Args:\n compiler: The compiler pipeline to be used by this executor, or `None` if\n the executor is to run without one.\n \"\"\"\n # TODO(b/113116813): Add a way to declare environmental bindings here,\n # e.g., a way to specify how data URIs are mapped to physical resources.\n\n if compiler is not None:\n py_typecheck.check_type(compiler, compiler_pipeline.CompilerPipeline)\n self._compiler = compiler\n self._intrinsic_method_dict = {\n intrinsic_defs.FEDERATED_AGGREGATE.uri:\n self._federated_aggregate,\n intrinsic_defs.FEDERATED_APPLY.uri:\n self._federated_apply,\n intrinsic_defs.FEDERATED_AVERAGE.uri:\n self._federated_average,\n intrinsic_defs.FEDERATED_BROADCAST.uri:\n self._federated_broadcast,\n intrinsic_defs.FEDERATED_COLLECT.uri:\n self._federated_collect,\n intrinsic_defs.FEDERATED_MAP.uri:\n self._federated_map,\n intrinsic_defs.FEDERATED_REDUCE.uri:\n self._federated_reduce,\n intrinsic_defs.FEDERATED_SUM.uri:\n self._federated_sum,\n intrinsic_defs.FEDERATED_VALUE_AT_CLIENTS.uri:\n self._federated_value_at_clients,\n intrinsic_defs.FEDERATED_VALUE_AT_SERVER.uri:\n self._federated_value_at_server,\n intrinsic_defs.FEDERATED_WEIGHTED_AVERAGE.uri:\n self._federated_weighted_average,\n intrinsic_defs.FEDERATED_ZIP_AT_CLIENTS.uri:\n self._federated_zip_at_clients,\n intrinsic_defs.FEDERATED_ZIP_AT_SERVER.uri:\n self._federated_zip_at_server,\n intrinsic_defs.GENERIC_PLUS.uri:\n self._generic_plus,\n intrinsic_defs.GENERIC_ZERO.uri:\n self._generic_zero,\n intrinsic_defs.SEQUENCE_MAP.uri:\n self._sequence_map,\n intrinsic_defs.SEQUENCE_REDUCE.uri:\n self._sequence_reduce,\n intrinsic_defs.SEQUENCE_SUM.uri:\n self._sequence_sum,\n }\n\n def ingest(self, arg, type_spec):\n\n def _handle_callable(func, func_type):\n py_typecheck.check_type(func, computation_base.Computation)\n type_utils.check_assignable_from(func.type_signature, func_type)\n return func\n\n return to_representation_for_type(arg, type_spec, _handle_callable)\n\n def invoke(self, func, arg):\n comp = self._compile(func)\n cardinalities = {}\n root_context = ComputationContext(cardinalities=cardinalities)\n computed_comp = self._compute(comp, root_context)\n type_utils.check_assignable_from(comp.type_signature,\n computed_comp.type_signature)\n if not isinstance(computed_comp.type_signature,\n computation_types.FunctionType):\n if arg is not None:\n raise TypeError('Unexpected argument {}.'.format(str(arg)))\n else:\n return computed_comp.value\n else:\n if arg is not None:\n\n def _handle_callable(func, func_type):\n py_typecheck.check_type(func, computation_base.Computation)\n type_utils.check_assignable_from(func.type_signature, func_type)\n computed_func = self._compute(self._compile(func), root_context)\n return computed_func.value\n\n computed_arg = ComputedValue(\n to_representation_for_type(\n arg, computed_comp.type_signature.parameter, _handle_callable),\n computed_comp.type_signature.parameter)\n cardinalities.update(get_cardinalities(computed_arg))\n else:\n computed_arg = None\n result = computed_comp.value(computed_arg)\n py_typecheck.check_type(result, ComputedValue)\n type_utils.check_assignable_from(comp.type_signature.result,\n result.type_signature)\n return result.value\n\n def _compile(self, comp):\n \"\"\"Compiles a `computation_base.Computation` to prepare it for execution.\n\n Args:\n comp: An instance of `computation_base.Computation`.\n\n Returns:\n An instance of `computation_building_blocks.ComputationBuildingBlock` that\n contains the compiled logic of `comp`.\n \"\"\"\n py_typecheck.check_type(comp, computation_base.Computation)\n if self._compiler is not None:\n comp = self._compiler.compile(comp)\n return transformations.name_compiled_computations(\n computation_building_blocks.ComputationBuildingBlock.from_proto(\n computation_impl.ComputationImpl.get_proto(comp)))\n\n def _compute(self, comp, context):\n \"\"\"Computes `comp` and returns the resulting computed value.\n\n Args:\n comp: An instance of\n `computation_building_blocks.ComputationBuildingBlock`.\n context: An instance of `ComputationContext`.\n\n Returns:\n The corresponding instance of `ComputedValue` that represents the result\n of `comp`.\n\n Raises:\n TypeError: If type mismatch occurs during the course of computation.\n ValueError: If a malformed value is encountered.\n NotImplementedError: For computation building blocks that are not yet\n supported by this executor.\n \"\"\"\n if isinstance(comp, computation_building_blocks.CompiledComputation):\n return self._compute_compiled(comp, context)\n elif isinstance(comp, computation_building_blocks.Call):\n return self._compute_call(comp, context)\n elif isinstance(comp, computation_building_blocks.Tuple):\n return self._compute_tuple(comp, context)\n elif isinstance(comp, computation_building_blocks.Reference):\n return self._compute_reference(comp, context)\n elif isinstance(comp, computation_building_blocks.Selection):\n return self._compute_selection(comp, context)\n elif isinstance(comp, computation_building_blocks.Lambda):\n return self._compute_lambda(comp, context)\n elif isinstance(comp, computation_building_blocks.Block):\n return self._compute_block(comp, context)\n elif isinstance(comp, computation_building_blocks.Intrinsic):\n return self._compute_intrinsic(comp, context)\n elif isinstance(comp, computation_building_blocks.Data):\n return self._compute_data(comp, context)\n elif isinstance(comp, computation_building_blocks.Placement):\n return self._compute_placement(comp, context)\n else:\n raise NotImplementedError(\n 'A computation building block of a type {} not currently recognized '\n 'by the reference executor: {}.'.format(str(type(comp)), str(comp)))\n\n def _compute_compiled(self, comp, context):\n py_typecheck.check_type(comp,\n computation_building_blocks.CompiledComputation)\n computation_oneof = comp.proto.WhichOneof('computation')\n if computation_oneof != 'tensorflow':\n raise ValueError(\n 'Expected all parsed compiled computations to be tensorflow, '\n 'but found \\'{}\\' instead.'.format(computation_oneof))\n else:\n return ComputedValue(lambda x: run_tensorflow(comp, x),\n comp.type_signature)\n\n def _compute_call(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Call)\n computed_func = self._compute(comp.function, context)\n py_typecheck.check_type(computed_func.type_signature,\n computation_types.FunctionType)\n if comp.argument is not None:\n computed_arg = self._compute(comp.argument, context)\n type_utils.check_assignable_from(computed_func.type_signature.parameter,\n computed_arg.type_signature)\n computed_arg = fit_argument(\n computed_arg, computed_func.type_signature.parameter, context)\n else:\n computed_arg = None\n result = computed_func.value(computed_arg)\n py_typecheck.check_type(result, ComputedValue)\n type_utils.check_assignable_from(computed_func.type_signature.result,\n result.type_signature)\n return result\n\n def _compute_tuple(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Tuple)\n result_elements = []\n result_type_elements = []\n for k, v in anonymous_tuple.to_elements(comp):\n computed_v = self._compute(v, context)\n type_utils.check_assignable_from(v.type_signature,\n computed_v.type_signature)\n result_elements.append((k, computed_v.value))\n result_type_elements.append((k, computed_v.type_signature))\n return ComputedValue(\n anonymous_tuple.AnonymousTuple(result_elements),\n computation_types.NamedTupleType(\n [(k, v) if k else v for k, v in result_type_elements]))\n\n def _compute_selection(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Selection)\n source = self._compute(comp.source, context)\n py_typecheck.check_type(source.type_signature,\n computation_types.NamedTupleType)\n py_typecheck.check_type(source.value, anonymous_tuple.AnonymousTuple)\n if comp.name is not None:\n result_value = getattr(source.value, comp.name)\n result_type = getattr(source.type_signature, comp.name)\n else:\n assert comp.index is not None\n result_value = source.value[comp.index]\n result_type = source.type_signature[comp.index]\n type_utils.check_assignable_from(comp.type_signature, result_type)\n return ComputedValue(result_value, result_type)\n\n def _compute_lambda(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Lambda)\n py_typecheck.check_type(context, ComputationContext)\n\n def _wrap(arg):\n py_typecheck.check_type(arg, ComputedValue)\n if not type_utils.is_assignable_from(comp.parameter_type,\n arg.type_signature):\n raise TypeError(\n 'Expected the type of argument {} to be {}, found {}.'.format(\n str(comp.parameter_name), str(comp.parameter_type),\n str(arg.type_signature)))\n return ComputationContext(context, {comp.parameter_name: arg})\n\n return ComputedValue(lambda x: self._compute(comp.result, _wrap(x)),\n comp.type_signature)\n\n def _compute_reference(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Reference)\n py_typecheck.check_type(context, ComputationContext)\n return context.resolve_reference(comp.name)\n\n def _compute_block(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Block)\n py_typecheck.check_type(context, ComputationContext)\n for local_name, local_comp in comp.locals:\n local_val = self._compute(local_comp, context)\n context = ComputationContext(context, {local_name: local_val})\n return self._compute(comp.result, context)\n\n def _compute_intrinsic(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Intrinsic)\n my_method = self._intrinsic_method_dict.get(comp.uri)\n if my_method is not None:\n # The interpretation of `my_method` depends on whether the intrinsic\n # does or does not take arguments. If it does, the method accepts the\n # argument as a `ComputedValue` instance. Otherwise, if the intrinsic\n # is not a function, but a constant (such as `GENERIC_ZERO`), the\n # method accepts the type of the result.\n if isinstance(comp.type_signature, computation_types.FunctionType):\n arg_type = comp.type_signature.parameter\n return ComputedValue(\n lambda x: my_method(fit_argument(x, arg_type, context)),\n comp.type_signature)\n else:\n return my_method(comp.type_signature)\n else:\n raise NotImplementedError('Intrinsic {} is currently unsupported.'.format(\n comp.uri))\n\n def _compute_data(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Data)\n raise NotImplementedError('Data is currently unsupported.')\n\n def _compute_placement(self, comp, context):\n py_typecheck.check_type(comp, computation_building_blocks.Placement)\n raise NotImplementedError('Placement is currently unsupported.')\n\n def _sequence_sum(self, arg):\n py_typecheck.check_type(arg.type_signature, computation_types.SequenceType)\n total = self._generic_zero(arg.type_signature.element)\n for v in arg.value:\n total = self._generic_plus(\n ComputedValue(\n anonymous_tuple.AnonymousTuple([(None, total.value), (None, v)]),\n [arg.type_signature.element, arg.type_signature.element]))\n return total\n\n def _federated_collect(self, arg):\n type_utils.check_federated_type(arg.type_signature, None,\n placements.CLIENTS, False)\n return ComputedValue(\n arg.value,\n computation_types.FederatedType(\n computation_types.SequenceType(arg.type_signature.member),\n placements.SERVER, True))\n\n def _federated_map(self, arg):\n mapping_type = arg.type_signature[0]\n py_typecheck.check_type(mapping_type, computation_types.FunctionType)\n type_utils.check_federated_type(arg.type_signature[1],\n mapping_type.parameter, placements.CLIENTS,\n False)\n fn = arg.value[0]\n result_val = [\n fn(ComputedValue(x, mapping_type.parameter)).value for x in arg.value[1]\n ]\n result_type = computation_types.FederatedType(mapping_type.result,\n placements.CLIENTS, False)\n return ComputedValue(result_val, result_type)\n\n def _federated_apply(self, arg):\n mapping_type = arg.type_signature[0]\n py_typecheck.check_type(mapping_type, computation_types.FunctionType)\n type_utils.check_federated_type(\n arg.type_signature[1], mapping_type.parameter, placements.SERVER, True)\n fn = arg.value[0]\n result_val = fn(ComputedValue(arg.value[1], mapping_type.parameter)).value\n result_type = computation_types.FederatedType(mapping_type.result,\n placements.SERVER, True)\n return ComputedValue(result_val, result_type)\n\n def _federated_sum(self, arg):\n type_utils.check_federated_type(arg.type_signature, None,\n placements.CLIENTS, False)\n collected_val = self._federated_collect(arg)\n federated_apply_arg = anonymous_tuple.AnonymousTuple(\n [(None, self._sequence_sum), (None, collected_val.value)])\n apply_fn_type = computation_types.FunctionType(\n computation_types.SequenceType(arg.type_signature.member),\n arg.type_signature.member)\n return self._federated_apply(\n ComputedValue(federated_apply_arg,\n [apply_fn_type, collected_val.type_signature]))\n\n def _federated_value_at_clients(self, arg):\n return ComputedValue(\n arg.value,\n computation_types.FederatedType(\n arg.type_signature, placements.CLIENTS, all_equal=True))\n\n def _federated_value_at_server(self, arg):\n return ComputedValue(\n arg.value,\n computation_types.FederatedType(\n arg.type_signature, placements.SERVER, all_equal=True))\n\n def _generic_zero(self, type_spec):\n if isinstance(type_spec, computation_types.TensorType):\n # TODO(b/113116813): Replace this with something more efficient, probably\n # calling some helper method from Numpy.\n with tf.Graph().as_default() as graph:\n zeros = tf.constant(0, type_spec.dtype, type_spec.shape)\n with tf.Session(graph=graph) as sess:\n zeros_val = sess.run(zeros)\n return ComputedValue(zeros_val, type_spec)\n elif isinstance(type_spec, computation_types.NamedTupleType):\n return ComputedValue(\n anonymous_tuple.AnonymousTuple(\n [(k, self._generic_zero(v).value)\n for k, v in anonymous_tuple.to_elements(type_spec)]), type_spec)\n elif isinstance(\n type_spec,\n (computation_types.SequenceType, computation_types.FunctionType,\n computation_types.AbstractType, computation_types.PlacementType)):\n raise TypeError(\n 'The generic_zero is not well-defined for TFF type {}.'.format(\n str(type_spec)))\n elif isinstance(type_spec, computation_types.FederatedType):\n if type_spec.all_equal:\n return ComputedValue(\n self._generic_zero(type_spec.member).value, type_spec)\n else:\n # TODO(b/113116813): Implement this in terms of the generic placement\n # operator once it's been added to the mix.\n raise NotImplementedError(\n 'Generic zero support for non-all_equal federated types is not '\n 'implemented yet.')\n else:\n raise NotImplementedError(\n 'Generic zero support for {} is not implemented yet.'.format(\n str(type_spec)))\n\n def _generic_plus(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n if len(arg.type_signature) != 2:\n raise TypeError('Generic plus is undefined for tuples of size {}.'.format(\n str(len(arg.type_signature))))\n element_type = arg.type_signature[0]\n if arg.type_signature[1] != element_type:\n raise TypeError('Generic plus is undefined for two-tuples of different '\n 'types ({} vs. {}).'.format(\n str(element_type), str(arg.type_signature[1])))\n if isinstance(element_type, computation_types.TensorType):\n return ComputedValue(arg.value[0] + arg.value[1], element_type)\n elif isinstance(element_type, computation_types.NamedTupleType):\n py_typecheck.check_type(arg.value[0], anonymous_tuple.AnonymousTuple)\n py_typecheck.check_type(arg.value[1], anonymous_tuple.AnonymousTuple)\n result_val_elements = []\n for idx, (name, elem_type) in enumerate(\n anonymous_tuple.to_elements(element_type)):\n to_add = ComputedValue(\n anonymous_tuple.AnonymousTuple([(None, arg.value[0][idx]),\n (None, arg.value[1][idx])]),\n [elem_type, elem_type])\n add_result = self._generic_plus(to_add)\n result_val_elements.append((name, add_result.value))\n return ComputedValue(\n anonymous_tuple.AnonymousTuple(result_val_elements), element_type)\n else:\n # TODO(b/113116813): Implement the remaining cases.\n raise NotImplementedError\n\n def _sequence_map(self, arg):\n mapping_type = arg.type_signature[0]\n py_typecheck.check_type(mapping_type, computation_types.FunctionType)\n sequence_type = arg.type_signature[1]\n py_typecheck.check_type(sequence_type, computation_types.SequenceType)\n type_utils.check_assignable_from(mapping_type.parameter,\n sequence_type.element)\n fn = arg.value[0]\n result_val = [\n fn(ComputedValue(x, mapping_type.parameter)).value for x in arg.value[1]\n ]\n result_type = computation_types.SequenceType(mapping_type.result)\n return ComputedValue(result_val, result_type)\n\n def _sequence_reduce(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n sequence_type = arg.type_signature[0]\n py_typecheck.check_type(sequence_type, computation_types.SequenceType)\n zero_type = arg.type_signature[1]\n op_type = arg.type_signature[2]\n py_typecheck.check_type(op_type, computation_types.FunctionType)\n type_utils.check_assignable_from(op_type.parameter,\n [zero_type, sequence_type.element])\n total = ComputedValue(arg.value[1], zero_type)\n reduce_fn = arg.value[2]\n for v in arg.value[0]:\n total = reduce_fn(\n ComputedValue(\n anonymous_tuple.AnonymousTuple([(None, total.value), (None, v)]),\n op_type.parameter))\n return total\n\n def _federated_reduce(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n federated_type = arg.type_signature[0]\n type_utils.check_federated_type(federated_type, None, placements.CLIENTS,\n False)\n zero_type = arg.type_signature[1]\n op_type = arg.type_signature[2]\n py_typecheck.check_type(op_type, computation_types.FunctionType)\n type_utils.check_assignable_from(op_type.parameter,\n [zero_type, federated_type.member])\n total = ComputedValue(arg.value[1], zero_type)\n reduce_fn = arg.value[2]\n for v in arg.value[0]:\n total = reduce_fn(\n ComputedValue(\n anonymous_tuple.AnonymousTuple([(None, total.value), (None, v)]),\n op_type.parameter))\n return self._federated_value_at_server(total)\n\n def _federated_average(self, arg):\n type_utils.check_federated_type(arg.type_signature, None,\n placements.CLIENTS, False)\n py_typecheck.check_type(arg.value, list)\n server_sum = self._federated_sum(arg)\n unplaced_avg = multiply_by_scalar(\n ComputedValue(server_sum.value, server_sum.type_signature.member),\n 1.0 / float(len(arg.value)))\n return ComputedValue(\n unplaced_avg.value,\n type_constructors.at_server(unplaced_avg.type_signature))\n\n def _federated_zip_at_server(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n for idx in range(len(arg.type_signature)):\n type_utils.check_federated_type(arg.type_signature[idx], None,\n placements.SERVER, True)\n return ComputedValue(\n arg.value,\n type_constructors.at_server(\n computation_types.NamedTupleType(\n [(k, v.member) if k else v.member\n for k, v in anonymous_tuple.to_elements(arg.type_signature)])))\n\n def _federated_zip_at_clients(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n py_typecheck.check_type(arg.value, anonymous_tuple.AnonymousTuple)\n zip_args = []\n zip_arg_types = []\n for idx in range(len(arg.type_signature)):\n val = arg.value[idx]\n py_typecheck.check_type(val, list)\n zip_args.append(val)\n val_type = arg.type_signature[idx]\n type_utils.check_federated_type(val_type, None, placements.CLIENTS, False)\n zip_arg_types.append(val_type.member)\n zipped_val = [anonymous_tuple.from_container(x) for x in zip(*zip_args)]\n return ComputedValue(\n zipped_val,\n type_constructors.at_clients(\n computation_types.NamedTupleType(zip_arg_types)))\n\n def _federated_aggregate(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n if len(arg.type_signature) != 5:\n raise TypeError('Expected a 5-tuple, found {}.'.format(\n str(arg.type_signature)))\n root_accumulator = self._federated_reduce(\n ComputedValue(\n anonymous_tuple.from_container([arg.value[k] for k in range(3)]),\n [arg.type_signature[k] for k in range(3)]))\n return self._federated_apply(\n ComputedValue([arg.value[4], root_accumulator.value],\n [arg.type_signature[4], root_accumulator.type_signature]))\n\n def _federated_weighted_average(self, arg):\n py_typecheck.check_type(arg.type_signature,\n computation_types.NamedTupleType)\n if len(arg.type_signature) != 2:\n raise TypeError('Expected a 2-tuple, found {}.'.format(\n str(arg.type_signature)))\n for _, v in anonymous_tuple.to_elements(arg.type_signature):\n type_utils.check_federated_type(v, None, placements.CLIENTS, False)\n if not type_utils.is_average_compatible(v.member):\n raise TypeError('Expected average-compatible args,'\n ' got {} from argument of type {}.'.format(\n str(v.member), arg.type_signature))\n v_type = arg.type_signature[0].member\n w_type = arg.type_signature[1].member\n py_typecheck.check_type(w_type, computation_types.TensorType)\n if w_type.shape.ndims != 0:\n raise TypeError('Expected scalar weight, got {}.'.format(str(w_type)))\n total = sum(arg.value[1])\n products_val = [\n multiply_by_scalar(ComputedValue(v, v_type), w / total).value\n for v, w in zip(arg.value[0], arg.value[1])\n ]\n return self._federated_sum(\n ComputedValue(products_val, type_constructors.at_clients(v_type)))\n\n def _federated_broadcast(self, arg):\n type_utils.check_federated_type(arg.type_signature, None, placements.SERVER,\n True)\n return ComputedValue(\n arg.value,\n computation_types.FederatedType(arg.type_signature.member,\n placements.CLIENTS, True))\n"
] | [
[
"tensorflow.Graph",
"tensorflow.TensorShape",
"tensorflow.constant",
"tensorflow.executing_eagerly",
"tensorflow.Session",
"numpy.array",
"tensorflow.dtypes.as_dtype"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dwarfer7634/AIND_CV_project | [
"46d95be3a68e1ea316b17ee833c17411f35b040e"
] | [
"utils.py"
] | [
"import os\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nfrom keras.models import load_model\r\nfrom pandas.io.parsers import read_csv\r\nfrom sklearn.utils import shuffle\r\n\r\ndef load_data(test=False):\r\n \"\"\"\r\n Loads data from FTEST if *test* is True, otherwise from FTRAIN.\r\n Important that the files are in a `data` directory\r\n \"\"\" \r\n FTRAIN = 'data/training.csv'\r\n FTEST = 'data/test.csv'\r\n fname = FTEST if test else FTRAIN\r\n df = read_csv(os.path.expanduser(fname)) # load dataframes\r\n\r\n # The Image column has pixel values separated by space; convert\r\n # the values to numpy arrays:\r\n df['Image'] = df['Image'].apply(lambda im: np.fromstring(im, sep=' '))\r\n\r\n df = df.dropna() # drop all rows that have missing values in them\r\n\r\n X = np.vstack(df['Image'].values) / 255. # scale pixel values to [0, 1]\r\n X = X.astype(np.float32)\r\n X = X.reshape(-1, 96, 96, 1) # return each images as 96 x 96 x 1\r\n\r\n if not test: # only FTRAIN has target columns\r\n y = df[df.columns[:-1]].values\r\n y = (y - 48) / 48 # scale target coordinates to [-1, 1]\r\n X, y = shuffle(X, y, random_state=42) # shuffle train data\r\n y = y.astype(np.float32)\r\n else:\r\n y = None\r\n\r\n return X, y\r\n\r\ndef plot_data(img, landmarks, axis):\r\n \"\"\"\r\n Plot image (img), along with normalized facial keypoints (landmarks)\r\n \"\"\"\r\n axis.imshow(np.squeeze(img), cmap='gray') # plot the image\r\n landmarks = landmarks * 48 + 48 # undo the normalization\r\n # Plot the keypoints\r\n axis.scatter(landmarks[0::2], \r\n landmarks[1::2], \r\n marker='o', \r\n c='c', \r\n s=40)\r\n\r\ndef plot_keypoints(img_path, \r\n face_cascade=cv2.CascadeClassifier('haarcascade_frontalface_alt.xml'),\r\n model_path='my_model.h5'):\r\n # TODO: write a function that plots keypoints on arbitrary image containing human\r\n img = cv2.imread(img_path)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n faces = face_cascade.detectMultiScale(gray)\r\n fig = plt.figure(figsize=(5,5))\r\n ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])\r\n ax.imshow(cv2.cvtColor(cv2.imread(img_path), cv2.COLOR_BGR2RGB))\r\n\r\n if len(faces) == 0:\r\n plt.title('no faces detected')\r\n elif len(faces) > 1:\r\n plt.title('too many faces detected')\r\n for (x,y,w,h) in faces:\r\n rectangle = cv2.rectangle(img,(x,y),(x+w,y+h),(255,255,0),2)\r\n ax.imshow(cv2.cvtColor(rectangle, cv2.COLOR_BGR2RGB))\r\n elif len(faces) == 1:\r\n plt.title('one face detected')\r\n x,y,w,h = faces[0]\r\n bgr_crop = img[y:y+h, x:x+w] \r\n orig_shape_crop = bgr_crop.shape\r\n gray_crop = cv2.cvtColor(bgr_crop, cv2.COLOR_BGR2GRAY)\r\n resize_gray_crop = cv2.resize(gray_crop, (96, 96)) / 255.\r\n model = load_model(model_path)\r\n landmarks = np.squeeze(model.predict(\r\n np.expand_dims(np.expand_dims(resize_gray_crop, axis=-1), axis=0)))\r\n ax.scatter(((landmarks[0::2] * 48 + 48)*orig_shape_crop[0]/96)+x, \r\n ((landmarks[1::2] * 48 + 48)*orig_shape_crop[1]/96)+y, \r\n marker='o', c='c', s=40)\r\n plt.show()\r\n"
] | [
[
"numpy.expand_dims",
"matplotlib.pyplot.title",
"sklearn.utils.shuffle",
"numpy.squeeze",
"numpy.fromstring",
"matplotlib.pyplot.show",
"numpy.vstack",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
marcowoo7/research | [
"d31fc5dd7b830cb99257fee5e7ece3f11fece85d"
] | [
"20220111_Modeling_code_for_winter_2022_MW.py"
] | [
"# =============================================================================\r\n# Import libraries\r\n# =============================================================================\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.integrate import odeint\r\nfrom scipy.integrate import cumtrapz\r\nimport time\r\n\r\nplt.close('all')\r\n\r\n\r\n# =============================================================================\r\n# Function for acoustic contrast factor\r\n# =============================================================================\r\ndef ACF(rhop,rhof,betap,betaf):\r\n phi = (5*rhop - 2*rhof)/(2*rhop + rhof) - (betap/betaf)\r\n \r\n return phi\r\n\r\n\r\n# =============================================================================\r\n# Acoustic radiation force. This function takes an array of particle positions \r\n# and outputs an array of dimensional forces on each particle\r\n# =============================================================================\r\ndef acousticrad(x,n,W,a,betaf,p0,phi):\r\n #Solve for wavenumber as a function of nodes\r\n k = n*np.pi/W\r\n \r\n #Initialize force array \r\n Fp = np.zeros(len(x))\r\n \r\n for i in range(len(x)):\r\n Fp[i] = np.pi*phi*k*pow(a,3)*betaf*pow(p0,2)*np.sin(2*k*x[i])/3\r\n \r\n return Fp\r\n\r\n\r\n# =============================================================================\r\n# Scattering force of each particle due to every other particle. Inputs listed\r\n# below:\r\n# x: x coordinates of all particles\r\n# n: number of nodes\r\n# a: particle radius\r\n# W: channel width\r\n# Nx: number of particles \r\n# betaf: compressibility of the fluid\r\n# betap: compressibility of the particle (active material)\r\n# rhof: density of the fluid\r\n# rhop: density of the particle (active material)\r\n# p0: acoustic pressure\r\n# \r\n# Output of this function is an array with the scattering force on each particle\r\n# =============================================================================\r\ndef scattering(x,n,a,W,Nx,betaf,betap,rhof,rhop,p0):\r\n #Define constants\r\n k = n*np.pi/W\r\n w2 = pow(k,2)/(betaf*rhof)\r\n \r\n #Initialize an array to hold the Fs terms for each particle\r\n Fs = np.zeros(Nx)\r\n \r\n #Sum over all particles to find net force on each particle due to scattering\r\n for i in range(Nx):\r\n for j in range(Nx):\r\n if i != j:\r\n if abs(i-j) <= 15:\r\n #Set up coordinate system\r\n dij = x[j] - x[i]\r\n xij = np.average([x[j],x[i]])\r\n \r\n #Calculate squares of pressure and velocity\r\n p2 = pow(p0,2)*pow(np.cos(k*xij),2)/2\r\n v2 = pow(p0,2)*betaf*pow(np.sin(k*xij),2)/(2*rhof)\r\n \r\n #Calculate scattering force\r\n term1 = 2*pow(rhop-rhof,2)*v2/(6*rhof*pow(dij,4))\r\n term2 = w2*rhof*pow(betap - betaf,2)*p2/(9*pow(dij,2)) \r\n Fs_hold = 4*np.pi*pow(a,6)*(term1-term2)\r\n \r\n if (dij > 0):\r\n Fs[i] -= Fs_hold\r\n elif (dij < 0):\r\n Fs[i] += Fs_hold\r\n \r\n return Fs\r\n\r\n\r\n# =============================================================================\r\n# System of ODEs to solve\r\n# x is an array of particle positions, and the function iterates new position\r\n# values until there are position values for all discrete time steps\r\n# =============================================================================\r\ndef position(x,t,acousticrad,scattering,n,W,a,betaf,betap,rhof,rhop,p0,phi,Nx,eta):\r\n Fp = acousticrad(x,n,W,a,betaf,p0,phi)\r\n Fs = scattering(x,n,a,W,Nx,betaf,betap,rhof,rhop,p0)\r\n \r\n drag = np.zeros(Nx)\r\n for i in range(Nx):\r\n drag[i] = (Fp[i] + Fs[i])/(6*np.pi*a*eta)\r\n \r\n return drag\r\n\r\n\r\n# =============================================================================\r\n# An equation for converting the volume fraction to the number of particles \r\n# across the channel width, rounded to the nearest integer\r\n# =============================================================================\r\ndef vf2np(vf,a,W):\r\n Np = int(np.round(W/a*(3*vf/(4*np.pi))**(1/3)))\r\n \r\n return Np\r\n\r\n\r\n# =============================================================================\r\n# A function that stores the material properties of electrode active materials\r\n# and other common focusing materials\r\n# =============================================================================\r\ndef material(mat_prop):\r\n if mat_prop == 1: #Alumina in epoxy and acetone\r\n rhop = 3.95*pow(10,3) #kg/m^3\r\n rhof = 1.12*pow(10,3) #kg/m^3\r\n betap = 1.6*pow(10,-12) #1/Pa\r\n betaf = 2.0*pow(10,-10) #1/Pa\r\n elif mat_prop ==2: #Silica in water\r\n rhop = 2.5*pow(10,3) #kg/m^3\r\n rhof = 1.0*pow(10,3) #kg/m^3\r\n betap = 2.72*pow(10,-10) #1/Pa\r\n betaf = 4.54*pow(10,-10) #1/Pa\r\n elif mat_prop ==3: #NMC in NMP and PVDF (battery materials)\r\n rhop = 4.5*pow(10,3) #kg/m^3\r\n rhof = 1.03*pow(10,3) #kg/m^3\r\n betap = 7.5*pow(10,-12) #1/Pa\r\n betaf = 5.2*pow(10,-10) #1/Pa\r\n\r\n return rhop,rhof,betap,betaf\r\n\r\n\r\n# =============================================================================\r\n# Function to solve for the number of nodes given the fluid material\r\n# properties, channel width, and operational frequency\r\n# =============================================================================\r\ndef f2n(rhof,betaf,W,f):\r\n c = np.sqrt(1/(rhof*betaf))\r\n n = round(2*f*W/c)\r\n \r\n return n\r\n\r\n\r\n# =============================================================================\r\n# Solve for initial particle position, this option spaces particles evenly in \r\n# the channel and adjusts positions if they are too close to an antinode. This\r\n# is done because particles near the antinodes tend to get trapped and take a \r\n# long time to focus. In an experiment, particles likely have a velocity in the\r\n# direction of the channel width, and this would cause them to move away from\r\n# the antinode naturally\r\n# =============================================================================\r\ndef init_pos_2(n,Np,W,a):\r\n dW = (W - Np*2*a)/(2*(Np+1))+a\r\n x0 = np.linspace(0+dW,W-dW,Np)\r\n \r\n #Solve for anti-nodes\r\n even = np.arange(0,2*n+1,2)\r\n antinode = np.zeros(len(even))\r\n \r\n for i in range(len(even)):\r\n antinode[i] = even[i]*W/(2*n)\r\n \r\n for i in range(len(antinode)):\r\n for j in range(len(x0)):\r\n test = antinode[i] - x0[j]\r\n if abs(test) < a/4:\r\n if test > 0:\r\n x0[j] = x0[j] - a/4\r\n else:\r\n x0[j] = x0[j] + a/4\r\n \r\n return x0\r\n\r\n\r\n# =============================================================================\r\n# Solves for the focused widths and spacing widths by using the roots of the \r\n# acoustic radiation force roots. The function returns the following:\r\n# Outer: an array with the outer positions of the particles\r\n# Root: the location of the nodes\r\n# avg_p: the average focusing width\r\n# avg_s: the average spacing between focusing widths\r\n# =============================================================================\r\ndef solve_width(a,W,n,x0,Np,pos,M):\r\n #Solve for location of nodes across channel width\r\n odd = np.arange(1,2*n,2)\r\n root = np.zeros(len(odd))\r\n \r\n for i in range(len(odd)):\r\n root[i] = odd[i]*W/(2*n)\r\n \r\n #Define bin edges\r\n even = np.arange(0,2*n+1,2)\r\n edges = np.zeros(len(even))\r\n \r\n for i in range(len(even)):\r\n edges[i] = even[i]*W/(2*n)\r\n \r\n #Sort into bins based on edge of each focusing node\r\n split,bins = np.histogram(x0,edges)\r\n \r\n #Find particles at the edge (outer region) of each focused region\r\n outer = np.zeros(2*n)\r\n outer[0] = 1\r\n outer[-1] = Np\r\n count_1 = 0\r\n j = 0\r\n \r\n for i in range(len(outer)-1):\r\n if i != 0:\r\n if i%2 != 0:\r\n count_1 += split[j]\r\n j += 1\r\n outer[i] = count_1\r\n else:\r\n count_2 = count_1 + 1\r\n outer[i] = count_2\r\n \r\n #Solve for the focusing widths and spacing widths\r\n width = np.zeros((len(pos),2*n-1))\r\n \r\n for i in range(len(pos)):\r\n for j in range(2*n-1):\r\n width[i,j] = pos[i,int(outer[j+1]-1)] - pos[i,int(outer[j]-1)]\r\n \r\n #Solving for focusing width and spacing\r\n width_p_int = np.arange(0,2*n-1,2)\r\n width_s_int = np.arange(1,2*n-1,2)\r\n width_p = np.zeros((M,len(width_p_int)))\r\n width_s = np.zeros((M,len(width_s_int)))\r\n avg_p = np.zeros(M)\r\n avg_s = np.zeros(M)\r\n \r\n for i in range(len(width_p_int)):\r\n width_p[:,i] = (width[:,width_p_int[i]]+(2*a))*10**6\r\n \r\n for i in range(len(width_s_int)):\r\n width_s[:,i] = (width[:,width_s_int[i]]-(2*a))*10**6\r\n \r\n for i in range(M):\r\n avg_p[i] = np.average(width_p[i,:])\r\n avg_s[i] = np.average(width_s[i,:]) \r\n \r\n return outer,root,avg_p,avg_s\r\n\r\n\r\n# =============================================================================\r\n# Function that solves for the index of when particles are focused completely\r\n# Inputs include\r\n# tol: the percent difference desired between the final focused state \r\n# and the reported critical focusing length\r\n# avg_p: average width of focused lines\r\n# avg_s: average spacing between focused lines\r\n# The function returns the first index of when avg_p/avg_s is within the \r\n# tolerance of the final focused value\r\n# =============================================================================\r\ndef ind_solve(tol,avg_p,avg_s):\r\n ratio = avg_p/avg_s\r\n end = ratio[-1]\r\n ind = []\r\n for i in range(len(ratio)):\r\n err = abs(ratio[i]-end)/end\r\n if err <= tol:\r\n ind.append(i)\r\n \r\n return ind[0]\r\n\r\n\r\n# =============================================================================\r\n# Function that solves for the index of when particles are focused completely.\r\n# This function differs from ind_solve because it considers the position of \r\n# individual particles rather than the entire focused width.\r\n# Inputs include\r\n# tol: the percent difference desired between the final focused state \r\n# and the reported critical focusing length (or index of this)\r\n# soln: array with particle trajectories\r\n# Np: the number of particles\r\n# The function returns the following:\r\n# ind: an array of indices (that correspond to each particle) that\r\n# indicate the point at which the particle is within the \r\n# tolerance of the function\r\n# hit_tol: \r\n# =============================================================================\r\ndef ind_solve_idv(tol,soln,Np):\r\n #Initialize an array to hold indices\r\n ind = np.zeros(Np)\r\n \r\n #Iterate over particles and find indices for when particles are within \r\n #the tolerance distance from their final focused position\r\n for i in range(Np):\r\n end = soln[-1,i]\r\n hold = []\r\n \r\n for j in range(np.shape(soln)[0]):\r\n err = abs(soln[j,i]-end)/end\r\n if err <= tol:\r\n hold.append(j)\r\n \r\n ind[i] = hold[0]\r\n \r\n #Create an array to keep track of when particles have hit this tolerance\r\n hit = np.zeros(np.shape(soln)[0])\r\n \r\n for i in range(Np):\r\n ind_range = np.arange(ind[i],np.shape(soln)[0],1)\r\n for j in range(len(ind_range)):\r\n hit[int(ind_range[j])]+=1\r\n \r\n hit_tol = [100*x/Np for x in hit]\r\n \r\n return ind, hit_tol\r\n\r\n\r\n# =============================================================================\r\n# Main, location where program is run\r\n# =============================================================================\r\ndef main():\r\n #Timer\r\n start = time.time()\r\n \r\n #Define constants\r\n a = 15*pow(10,-6) # Particle radius (m)\r\n W = 3*pow(10,-3) # Channel width (m)\r\n vs = 0.001 # Velocity of fluid (m/s)\r\n eta = 10 # Viscosity (Pa-s) \r\n p0 = 1*pow(10,6) # Acoustic pressure (Pa) \r\n vf = 0.03 # Volume fraction (decimal) \r\n Np = vf2np(vf,a,W) # Number of particles\r\n \r\n #Specify which material properties are of interest\r\n # 1 = Alumina in Epoxy and Acetone\r\n # 2 = Silica in Water\r\n # 3 = NMC in NMP and PVDF\r\n mat_prop = 1\r\n rhop,rhof,betap,betaf = material(mat_prop)\r\n \r\n #Set up the format of plots\r\n font = {'fontname':'Helvetica'} #font style\r\n FS = 28 #font size\r\n LW = 2 #line width\r\n NS = 24 #number size\r\n \r\n #Solve for acoustic contrast factor\r\n phi = ACF(rhop, rhof, betap, betaf)\r\n print(f\"Acoustic contrast factor: {phi}\")\r\n \r\n #Set time interval \r\n t = np.arange(0,150,0.01)\r\n \r\n# =============================================================================\r\n# Plot particle trajectory through channel\r\n# =============================================================================\r\n #Frequency to node conversion\r\n f = 1.5*pow(10,6) #Frequency (Hz)\r\n n = f2n(rhof,betaf,W,f)\r\n \r\n #Set initial particle positions\r\n x0 = init_pos_2(n,Np,W,a)\r\n \r\n #Solve for plug flow solution\r\n plug_soln = odeint(position,x0,t, args = (acousticrad,scattering,n,W,a,betaf,betap,rhof,rhop,p0,phi,Np,eta))\r\n\r\n #Solve for roots\r\n M = np.shape(plug_soln)[0]\r\n focus,root,avg_p,avg_s = solve_width(a,W,n,x0,Np,plug_soln,M)\r\n \r\n #Mapping the area taken up by particles- plug flow\r\n for i in range(Np):\r\n y1 = plug_soln[:,i]*10**2 - a*10**2\r\n y2 = plug_soln[:,i]*10**2 + a*10**2\r\n plt.figure(6)\r\n plt.plot(t*vs*10**2,y1,color='black')\r\n plt.plot(t*vs*10**2,y2,color='black')\r\n plt.fill_between(t*vs*10**2,y1,y2,color = 'black')\r\n plt.hlines(root*10**2,0,240,colors = 'b')\r\n plt.xlabel('Focusing Length (cm)',fontsize = FS,**font)\r\n plt.ylabel('Particle Position (cm)',fontsize = FS,**font)\r\n plt.tick_params(labelsize = NS)\r\n plt.xlim((-0.05,10))\r\n plt.ylim((0,W*10**2))\r\n\r\n\r\n \r\n# =============================================================================\r\n# Plot of width ratio and pore width vs volume fraction for different nodes\r\n# =============================================================================\r\n #Note to user: Set nodes of interest and volume fraction range below \r\n n1 = f2n(rhof,betaf,W,3*pow(10,6))\r\n n2 = f2n(rhof,betaf,W,5*pow(10,6)) \r\n n3 = f2n(rhof,betaf,W,8*pow(10,6))\r\n N = [n1,n2,n3]\r\n vf_range = [0.05,0.1,0.15]\r\n \r\n for i in range(len(N)):\r\n print(N[i])\r\n \r\n #Set up arrays for holding focusing width information\r\n ratio = np.zeros(len(vf_range))\r\n width_s = np.zeros(len(vf_range))\r\n \r\n for j in range(len(vf_range)): \r\n #Calculate the number of particles and their starting positions\r\n Np = vf2np(vf_range[j],a,W)\r\n print(Np)\r\n x0 = init_pos_2(N[i],Np,W,a)\r\n \r\n #Solve for position\r\n plug_soln = odeint(position,x0,t, args = (acousticrad,scattering,N[i],W,a,betaf,betap,rhof,rhop,p0,phi,Np,eta))\r\n \r\n #Solve for roots\r\n M = np.shape(plug_soln)[0]\r\n focus,root,avg_p,avg_s = solve_width(a,W,N[i],x0,Np,plug_soln,M)\r\n \r\n #Put width values into array\r\n ratio[j] = avg_p[-1]/avg_s[-1]\r\n width_s[j] = avg_s[-1]\r\n \r\n plt.figure(2)\r\n plt.plot(vf_range,ratio,linewidth = 3)\r\n plt.xlim((0,0.25))\r\n plt.ylim((1,4))\r\n plt.tick_params(labelsize = NS)\r\n plt.xlabel('Volume Fraction',fontsize = FS,**font)\r\n plt.ylabel('Electrode:Pore Width',fontsize = FS,**font)\r\n plt.legend(['3 MHz','5 MHz','8 MHz'],loc = 2,prop={'size':NS})\r\n \r\n plt.figure(3)\r\n plt.plot(vf_range,width_s,linewidth = 3)\r\n plt.xlim((0,0.25))\r\n plt.ylim((5,140))\r\n plt.tick_params(labelsize = NS)\r\n plt.xlabel('Volume Fraction',fontsize = FS,**font)\r\n plt.ylabel('Pore Width (\\u03bcm)',fontsize = FS,**font)\r\n plt.legend(['3 MHz','5 MHz','8 MHz'],loc = 3,prop={'size':NS})\r\n \r\n \r\n #Timer\r\n end = time.time()\r\n total = round((end - start)/60)\r\n print(f\"Program runtime is {total} minutes.\")\r\n \r\n \r\nmain()\r\n \r\n "
] | [
[
"matplotlib.pyplot.legend",
"numpy.sqrt",
"numpy.linspace",
"scipy.integrate.odeint",
"numpy.round",
"matplotlib.pyplot.plot",
"numpy.histogram",
"numpy.arange",
"matplotlib.pyplot.hlines",
"numpy.sin",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.fill_between",
"matplotlib.pyplot.ylabel",
"numpy.cos",
"matplotlib.pyplot.xlim",
"numpy.shape",
"matplotlib.pyplot.xlabel",
"numpy.average",
"matplotlib.pyplot.tick_params"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
vsskarthik/noxim-new | [
"c5fcb547f172390cfa06988f66c3e89a2ba008af"
] | [
"bin/plot_results.py"
] | [
"import matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\nimport os\n\ndir = 'latency'\ncsv_folder = 'new_csv_files'\nfile_list = os.listdir(f'./{csv_folder}/'+dir)\nfile_list.sort()\nfor i,j in enumerate(file_list):\n print(f'{i}) {j}')\n\nch = list(map(int,input('Space Seperated File Numbers: ').split()))\n\nfiles = []\n\nfor i in ch:\n files.append(file_list[i])\n\n\nrate_list = []\nlatency_list = []\n\nfor i in files:\n df = pd.read_csv(f'./{csv_folder}/'+dir+'/'+i,header=None)\n rates = df.iloc[:,0].values\n latencies = df.iloc[:,1].values\n rate_list.append(rates)\n latency_list.append(latencies)\n #print(rates,latencies)\n\nfor x,y in zip(rate_list,latency_list):\n plt.plot(x,y,'-o')\n\nlegends = [x[:x.rindex(\"_\")] for x in files]\nlegends = [x[:x.rindex(\"_\")] for x in legends]\nlegends = [x[:x.rindex(\"_\")] if('BIT' in x) else x for x in legends]\ntraffic = files[0]\ntraffic = traffic[:traffic.rindex('_')]\nif('BIT' in traffic):\n idx = traffic[:traffic.rindex('_')].rindex('_')\n traffic = traffic[idx+1:]\nelse:\n traffic = traffic[traffic.rindex('_')+1:]\n\n\nplt.legend(legends)\nplt.xlabel('Packet Injection Rates(packet/cycle/node)')\nplt.ylabel('Average Packet Latency(cycles)')\nplt.title(traffic)\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"pandas.read_csv",
"matplotlib.pyplot.title",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SohamChattopadhyayEE/Medical-image-segmentation | [
"3e36fd6ba4afa6320bcffdb1414543177ada78f3"
] | [
"util/dice_coeff.py"
] | [
"import torch\r\nfrom torch.autograd import Function\r\n\r\n\r\nclass DiceCoeff(Function):\r\n \"\"\"Dice coeff for individual examples\"\"\"\r\n\r\n def forward(self, input, target):\r\n self.save_for_backward(input, target)\r\n eps = 0.0001\r\n self.inter = torch.dot(input.view(-1), target.view(-1))\r\n self.union = torch.sum(input) + torch.sum(target) + eps\r\n\r\n t = (2 * self.inter.float() + eps) / self.union.float()\r\n return t\r\n\r\n # This function has only a single output, so it gets only one gradient\r\n def backward(self, grad_output):\r\n\r\n input, target = self.saved_variables\r\n grad_input = grad_target = None\r\n\r\n if self.needs_input_grad[0]:\r\n grad_input = grad_output * 2 * (target * self.union - self.inter) \\\r\n / (self.union * self.union)\r\n if self.needs_input_grad[1]:\r\n grad_target = None\r\n\r\n return grad_input, grad_target\r\n\r\n\r\ndef dice_coeff(input, target):\r\n \"\"\"Dice coeff for batches\"\"\"\r\n if input.is_cuda:\r\n s = torch.FloatTensor(1).cuda().zero_()\r\n else:\r\n s = torch.FloatTensor(1).zero_()\r\n\r\n for i, c in enumerate(zip(input, target)):\r\n s = s + DiceCoeff().forward(c[0], c[1])\r\n\r\n return s / (i + 1)\r\n"
] | [
[
"torch.FloatTensor",
"torch.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
chengjianglong/clab | [
"504a111a5ffbaa119dc64b30c8f7cb14288923a8"
] | [
"clab/util/fnameutil.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nProcessing for filenames. The logic is relatively hacky.\n\npip install pygtrie\n\"\"\"\nfrom __future__ import print_function, division\nfrom os.path import commonprefix, isdir, dirname, relpath, splitext\nfrom collections import deque\nimport pygtrie\n\n\ndef shortest_unique_prefixes(items, sep=None, allow_simple=True):\n \"\"\"\n The shortest unique prefix algorithm.\n\n Args:\n items (list of str): returned prefixes will be unique wrt this set\n sep (str): if specified, all characters between separators are treated\n as a single symbol. Makes the algo much faster.\n allow_simple (bool): if True tries to construct a simple feasible\n solution before resorting to the optimal trie algorithm.\n\n Returns:\n list of str: a prefix for each item that uniquely identifies it\n wrt to the original items.\n\n References:\n http://www.geeksforgeeks.org/find-all-shortest-unique-prefixes-to-represent-each-word-in-a-given-list/\n https://github.com/Briaares/InterviewBit/blob/master/Level6/Shortest%20Unique%20Prefix.cpp\n\n Requires:\n pip install pygtrie\n\n Doctest:\n >>> from clab.fnameutil import *\n >>> items = [\"zebra\", \"dog\", \"duck\", \"dove\"]\n >>> shortest_unique_prefixes(items)\n ['z', 'dog', 'du', 'dov']\n\n Timeing:\n >>> # make numbers larger to stress test\n >>> # L = max length of a string, N = number of strings,\n >>> # C = smallest gaurenteed common length\n >>> # (the setting N=10000, L=100, C=20 is feasible we are good)\n >>> import random\n >>> def make_data(N, L, C):\n >>> rng = random.Random(0)\n >>> return [''.join(['a' if i < C else chr(rng.randint(97, 122))\n >>> for i in range(L)]) for _ in range(N)]\n >>> items = make_data(N=1000, L=10, C=0)\n >>> %timeit shortest_unique_prefixes(items)\n 17.5 ms ± 244 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n >>> items = make_data(N=1000, L=100, C=0)\n >>> %timeit shortest_unique_prefixes(items)\n 141 ms ± 1.05 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n >>> items = make_data(N=1000, L=100, C=70)\n >>> %timeit shortest_unique_prefixes(items)\n 141 ms ± 1.05 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n >>> items = make_data(N=10000, L=250, C=20)\n >>> %timeit shortest_unique_prefixes(items)\n 3.55 s ± 23 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n \"\"\"\n if len(set(items)) != len(items):\n raise ValueError('inputs must be unique')\n\n # construct trie\n if sep is None:\n trie = pygtrie.CharTrie.fromkeys(items, value=0)\n else:\n # In some simple cases we can avoid constructing a trie\n if allow_simple:\n tokens = [item.split(sep) for item in items]\n simple_solution = [t[0] for t in tokens]\n if len(simple_solution) == len(set(simple_solution)):\n return simple_solution\n for i in range(2, 10):\n # print('return simple solution at i = {!r}'.format(i))\n simple_solution = ['-'.join(t[:i]) for t in tokens]\n if len(simple_solution) == len(set(simple_solution)):\n return simple_solution\n\n trie = pygtrie.StringTrie.fromkeys(items, value=0, separator=sep)\n\n # Set the value (frequency) of all nodes to zero.\n for node in _trie_iternodes(trie):\n node.value = 0\n\n # For each item trace its path and increment frequencies\n for item in items:\n final_node, trace = trie._get_node(item)\n for key, node in trace:\n node.value += 1\n\n # if not isinstance(node.value, int):\n # node.value = 0\n\n # Query for the first prefix with frequency 1 for each item.\n # This is the shortest unique prefix over all items.\n unique = []\n for item in items:\n freq = None\n for prefix, freq in trie.prefixes(item):\n if freq == 1:\n break\n assert freq == 1, 'item={} has no unique prefix'.format(item)\n unique.append(prefix)\n return unique\n\n\ndef _trie_iternodes(self):\n \"\"\"\n Generates all nodes in the trie\n\n # Hack into the internal structure and insert frequencies at each node\n \"\"\"\n stack = deque([[self._root]])\n while stack:\n for node in stack.pop():\n yield node\n stack.append(node.children.values())\n\n\ndef shortest_unique_suffixes(items, sep=None):\n \"\"\"\n Example:\n >>> from clab.fnameutil import *\n >>> items = [\"zebra\", \"dog\", \"duck\", \"dove\"]\n >>> shortest_unique_suffixes(items)\n ['a', 'g', 'k', 'e']\n\n Example:\n >>> from clab.fnameutil import *\n >>> items = [\"aa/bb/cc\", \"aa/bb/bc\", \"aa/bb/dc\", \"aa/cc/cc\"]\n >>> shortest_unique_suffixes(items)\n ['a', 'g', 'k', 'e']\n \"\"\"\n snoitpo = [p[::-1] for p in items]\n sexiffus = shortest_unique_prefixes(snoitpo, sep=sep)\n suffixes = [s[::-1] for s in sexiffus]\n return suffixes\n\n\n# def _align_fallback(paths1, paths2):\n# \"\"\"\n# Ignore:\n# >>> import itertools as it\n# >>> from clab.util.fnameutil import *\n# >>> from clab.util.fnameutil import _align_fallback, _safepaths\n# >>> def _make_input(fmt, n=10):\n# >>> for i in range(n):\n# >>> yield (fmt.format(id=i, type='im'), fmt.format(id=i, type='gt'))\n# >>> #yield (fmt.format(id=i, type='im'), fmt.format(id=i, type='gt'))\n# >>> #\n# >>> n = 4\n# >>> paths1, paths2 = map(list, zip(*it.chain(\n# >>> _make_input('{type}/{id}.png', n=n),\n# >>> _make_input('{id}/{type}.png', n=n),\n# >>> #_make_input('{type}/{id}-{type}.png', n=n),\n# >>> #_make_input('{type}/{type}-{id}.png', n=n),\n# >>> #_make_input('{id}/{type}-{id}.png', n=n),\n# >>> #_make_input('{id}/{id}-{type}.png', n=n),\n# >>> )))\n# >>> np.random.shuffle(paths2)\n# \"\"\"\n# import numpy as np\n# import editdistance\n\n# safe_paths1 = _safepaths(paths1)\n# safe_paths2 = _safepaths(paths2)\n\n# # initialize a cost matrix\n# shape = (len(safe_paths1), len(safe_paths2))\n# cost_matrix = np.full(shape, fill_value=np.inf)\n\n# # Can we come up with the right distance function?\n# # edit-distance wont work for long type specifiers\n# # does tokenized strings help?\n# import re\n# tokens1 = [re.split('[-.]', p) for p in safe_paths1]\n# tokens2 = [re.split('[-.]', p) for p in safe_paths2]\n\n# import ubelt as ub\n# # import itertools as it\n# # TODO: use frequency weights\n# # token_freq = ub.dict_hist(it.chain(*(tokens1 + tokens2)))\n# # token_weights = ub.map_vals(lambda x: 1 / x, token_freq)\n\n# # The right distance function might be to weight the disagree bit by the\n# # frequency of the token in the dataset.\n\n# # only compute one half of the triangle\n# idxs1, idxs2 = np.triu_indices(len(safe_paths1), k=0)\n# distances = [\n# editdistance.eval(tokens1[i], tokens2[j])\n# for i, j in zip(idxs1, idxs2)\n# ]\n# cost_matrix[(idxs1, idxs2)] = distances\n\n# # make costs symmetric\n# cost_matrix = np.minimum(cost_matrix.T, cost_matrix)\n\n# import scipy.optimize\n# assign = scipy.optimize.linear_sum_assignment(cost_matrix)\n\n# sortx = assign[1][assign[0].argsort()]\n\n# import ubelt as ub\n# list(ub.take(paths2, sortx))\n\n\ndef dumpsafe(paths, repl='<sl>'):\n \"\"\"\n enforces that filenames will not conflict.\n Removes common the common prefix, and replaces slashes with <sl>\n\n >>> paths = ['foo/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> list(dumpsafe(paths, '-'))\n \"\"\"\n common_pref = commonprefix(paths)\n if not isdir(common_pref):\n im_pref = dirname(common_pref)\n if common_pref[len(im_pref):len(im_pref) + 1] == '/':\n im_pref += '/'\n elif common_pref[len(im_pref):len(im_pref) + 1] == '\\\\':\n im_pref += '\\\\'\n else:\n im_pref = common_pref\n\n start = len(im_pref)\n dump_paths = (\n p[start:].replace('/', repl).replace('\\\\', repl) # faster\n # relpath(p, im_pref).replace('/', repl).replace('\\\\', repl)\n for p in paths\n )\n return dump_paths\n\n\ndef _fast_name_we(fname):\n # Assume that extensions are no more than 7 chars for extra speed\n pos = fname.rfind('.', -7)\n return fname if pos == -1 else fname[:pos]\n\n\ndef _fast_basename_we(fname):\n slashpos = fname.rfind('/')\n base = fname if slashpos == -1 else fname[slashpos + 1:]\n pos = base.rfind('.', -slashpos)\n base_we = base if pos == -1 else base[:pos]\n return base_we\n\n\ndef _safepaths(paths):\n \"\"\"\n x = '/home/local/KHQ/jon.crall/code/clab/clab/live/urban_train.py'\n import re\n %timeit splitext(x.replace('<sl>', '-').replace('_', '-'))[0]\n %timeit splitext(re.sub('<sl>|_', '-', x))\n %timeit x[:x.rfind('.')].replace('<sl>', '-').replace('_', '-')\n %timeit _fast_name_we(x)\n %timeit x[:x.rfind('.')]\n\n >>> paths = ['foo/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> _safepaths(paths)\n \"\"\"\n safe_paths = [\n # faster than splitext\n _fast_name_we(x).replace('_', '-').replace('<sl>', '-')\n # splitext(x.replace('<sl>', '-').replace('_', '-'))[0]\n for x in dumpsafe(paths, repl='-')\n ]\n return safe_paths\n\n\ndef align_paths(paths1, paths2):\n \"\"\"\n return path2 in the order of path1\n\n This function will only work where file types (i.e. image / groundtruth)\n are specified by EITHER a path prefix XOR a path suffix (note this is an\n exclusive or. do not mix prefixes and suffixes), either as part of a\n filename or parent directory. In the case of a filename it is assumped this\n \"type identifier\" is separated from the rest of the path by an underscore\n or hyphen.\n\n paths1, paths2 = gt_paths, pred_paths\n\n Doctest:\n >>> from clab.util.fnameutil import *\n >>> def test_gt_arrangements(paths1, paths2, paths2_):\n >>> # no matter what order paths2_ comes in, it should align with the groundtruth\n >>> assert align_paths(paths1, paths2_) == paths2\n >>> assert align_paths(paths1[::-1], paths2_) == paths2[::-1]\n >>> assert align_paths(paths1[0::2] + paths1[1::2], paths2_) == paths2[0::2] + paths2[1::2]\n >>> sortx = np.arange(len(paths1))\n >>> np.random.shuffle(sortx)\n >>> assert align_paths(list(np.take(paths1, sortx)), paths2_) == list(np.take(paths2, sortx))\n >>> #\n >>> def test_input_arrangements(paths1, paths2):\n >>> paths2_ = paths2.copy()\n >>> test_gt_arrangements(paths1, paths2, paths2_)\n >>> test_gt_arrangements(paths1, paths2, paths2_[::-1])\n >>> np.random.shuffle(paths2_)\n >>> test_gt_arrangements(paths1, paths2, paths2_)\n >>> paths1 = ['foo/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> paths2 = ['bar/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> test_input_arrangements(paths1, paths2)\n >>> paths1 = ['foo/{:04d}/{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> paths2 = ['bar<sl>{:04d}<sl>{:04d}'.format(i, j) for i in range(2) for j in range(20)]\n >>> test_input_arrangements(paths1, paths2)\n\n Speed:\n >>> import ubelt as ub\n >>> paths1 = [ub.truepath('~/foo/{:04d}/{:04d}').format(i, j) for i in range(2) for j in range(10000)]\n >>> paths2 = [ub.truepath('~/bar/{:04d}/{:04d}').format(i, j) for i in range(2) for j in range(10000)]\n >>> np.random.shuffle(paths2)\n >>> aligned = align_paths(paths1, paths2)\n\n items = [p[::-1] for p in _safepaths(paths1)]\n\n Ignore:\n >>> # pathological case (can we support this?)\n >>> aligned = [\n >>> ('ims/aaa.png', 'gts/aaa.png'),\n >>> ('ims/bbb.png', 'gts/bbb.png'),\n >>> ('ims/ccc.png', 'gts/ccc.png'),\n >>> # ---\n >>> ('aaa/im.png', 'aaa/gt.png'),\n >>> ('bbb/im.png', 'bbb/gt.png'),\n >>> ('ccc/im.png', 'ccc/gt.png'),\n >>> # ---\n >>> ('ims/im-aaa.png', 'gts/gt-aaa.png'),\n >>> ('ims/im-bbb.png', 'gts/gt-bbb.png'),\n >>> ('ims/im-ccc.png', 'gts/gt-ccc.png'),\n >>> # ---\n >>> ('ims/aaa-im.png', 'gts/aaa-gt.png'),\n >>> ('ims/bbb-im.png', 'gts/bbb-gt.png'),\n >>> ('ims/ccc-im.png', 'gts/ccc-gt.png'),\n >>> ]\n >>> paths1, paths2 = zip(*aligned)\n\n \"\"\"\n\n def comparable_unique_path_ids(paths1, paths2):\n \"\"\"\n Given two unordered sets of paths (that are assumed to have some unknown\n correspondence) we find a unique id for each path in each set such that\n they can be aligned.\n \"\"\"\n assert len(paths1) == len(paths2), (\n 'cannot align unequal no of items {} != {}.'.format(len(paths1), len(paths2)))\n\n do_quick_check = True\n if do_quick_check:\n # First check the simple thing: do they have unique corresponding\n # basenames. If not we do something a bit more complex.\n simple_unique1 = list(map(_fast_basename_we, paths1))\n simple_unique_set1 = set(simple_unique1)\n if len(simple_unique_set1) == len(paths1):\n simple_unique2 = list(map(_fast_basename_we, paths2))\n simple_unique_set2 = set(simple_unique2)\n if simple_unique_set2 == simple_unique_set1:\n return simple_unique1, simple_unique2\n\n safe_paths1 = _safepaths(paths1)\n safe_paths2 = _safepaths(paths2)\n\n # unique identifiers that should be comparable\n unique1 = shortest_unique_suffixes(safe_paths1, sep='-')\n unique2 = shortest_unique_suffixes(safe_paths2, sep='-')\n\n def not_comparable_msg():\n return '\\n'.join([\n 'paths are not comparable'\n 'safe_paths1 = {}'.format(safe_paths1[0:3]),\n 'safe_paths2 = {}'.format(safe_paths1[0:3]),\n 'paths1 = {}'.format(safe_paths1[0:3]),\n 'paths2 = {}'.format(safe_paths1[0:3]),\n 'unique1 = {}'.format(unique1[0:3]),\n 'unique2 = {}'.format(unique2[0:3]),\n ])\n\n try:\n # Assert these are unique identifiers common between paths\n assert sorted(set(unique1)) == sorted(unique1), not_comparable_msg()\n assert sorted(set(unique2)) == sorted(unique2), not_comparable_msg()\n assert sorted(unique1) == sorted(unique2), not_comparable_msg()\n except AssertionError:\n unique1 = shortest_unique_prefixes(safe_paths1, sep='-')\n unique2 = shortest_unique_prefixes(safe_paths2, sep='-')\n # Assert these are unique identifiers common between paths\n assert sorted(set(unique1)) == sorted(unique1), not_comparable_msg()\n assert sorted(set(unique2)) == sorted(unique2), not_comparable_msg()\n assert sorted(unique1) == sorted(unique2), not_comparable_msg()\n return unique1, unique2\n\n import numpy as np\n unique1, unique2 = comparable_unique_path_ids(paths1, paths2)\n\n lookup = {k: v for v, k in enumerate(unique1)}\n sortx = np.argsort([lookup[u] for u in unique2])\n\n sorted_paths2 = [paths2[x] for x in sortx]\n return sorted_paths2\n\n\ndef check_aligned(paths1, paths2):\n from os.path import basename\n if len(paths1) != len(paths2):\n return False\n\n # Try to short circuit common cases\n basenames1 = map(basename, paths1)\n basenames2 = map(basename, paths2)\n if all(p1 == p2 for p1, p2 in zip(basenames1, basenames2)):\n return True\n\n try:\n # Full case\n aligned_paths2 = align_paths(paths1, paths2)\n except AssertionError:\n return False\n return aligned_paths2 == paths2\n"
] | [
[
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CRISPRJWCHOI/IndelSearcher | [
"5824069868499bfbbd74582d5dc7a9a64a15035b"
] | [
"Random_sequence_generator.py"
] | [
"#!/usr/bin/env python\r\n\r\nimport sys\r\nimport pdb\r\nimport numpy as np\r\n\r\nnumber=int(sys.argv[1])\r\nlength=int(sys.argv[2])\r\nprobA=float(sys.argv[3])\r\nprobT=float(sys.argv[4])\r\nprobC=float(sys.argv[5])\r\nprobG=float(sys.argv[6])\r\nout=sys.argv[7]\r\n# m rows\r\n# n columns\r\n\r\ndef sequence(m,n):\r\n out2 = np.empty(shape=(0,0))\r\n nucleotide = list('ATCG')\r\n\r\n while m > np.shape(out2)[0]:\r\n out1 = np.random.choice(nucleotide, m*n, p=[probA,probT,probC,probG])\r\n out1 = out1.reshape(m,n)\r\n out2 = np.unique(out1, axis=0)\r\n\r\n return out2\r\n\r\n\r\nnp.random.seed(1)\r\n\r\ndef Main():\r\n\r\n with open(out, 'w') as Output:\r\n for lCol in sequence(number, length):\r\n #print(list(lCol))\r\n #pdb.set_trace()\r\n Output.write(''.join(lCol.tolist()) +'\\n')\r\n\r\n #np.savetxt(out, sequence(number,length), delimiter='\\t')\r\n\r\nMain()\r\n\r\n"
] | [
[
"numpy.random.seed",
"numpy.unique",
"numpy.random.choice",
"numpy.shape",
"numpy.empty"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.